VirtualBox

Changeset 31090 in vbox


Ignore:
Timestamp:
Jul 26, 2010 6:51:04 AM (15 years ago)
Author:
vboxsync
Message:

PGMAllBth.h: Cleaned out the old code.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r31089 r31090  
    7373#endif
    7474
    75 /* enables the new code. */
    76 #define PGM_WITH_GST_WALK
    77 
    7875#ifndef IN_RING3
    7976
    80 #ifdef PGM_WITH_GST_WALK
    8177# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    8278/**
     
    129125}
    130126# endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    131 #endif /* PGM_WITH_GST_WALK */
    132127
    133128
     
    157152    && PGM_SHW_TYPE != PGM_TYPE_NESTED    \
    158153    && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
    159 #ifdef PGM_WITH_GST_WALK
    160154    int rc;
    161155
     
    241235#  endif /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    242236
    243 #else  /* !PGM_WITH_GST_WALK */
    244 
    245 #  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
    246     /*
    247      * Hide the instruction fetch trap indicator if NX isn't active.
    248      */
    249     /** @todo do this only when returning with a guest trap! */
    250     if ((uErr & X86_TRAP_PF_ID) && !pVCpu->pgm.s.fNoExecuteEnabled)
    251     {
    252         uErr &= ~X86_TRAP_PF_ID;
    253         TRPMSetErrorCode(pVCpu, uErr);
    254     }
    255 #  endif
    256 
    257     /*
    258      * Get PDs.
    259      */
    260     int             rc;
    261 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    262 #   if PGM_GST_TYPE == PGM_TYPE_32BIT
    263     const unsigned  iPDSrc = pvFault >> GST_PD_SHIFT;
    264     PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
    265 
    266 #   elif PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
    267 
    268 #    if PGM_GST_TYPE == PGM_TYPE_PAE
    269     unsigned        iPDSrc = 0;                 /* initialized to shut up gcc */
    270     X86PDPE         PdpeSrc;
    271     PGSTPD          pPDSrc = pgmGstGetPaePDPtr(pVCpu, pvFault, &iPDSrc, &PdpeSrc);
    272 
    273 #    elif PGM_GST_TYPE == PGM_TYPE_AMD64
    274     unsigned        iPDSrc = 0;                 /* initialized to shut up gcc */
    275     PX86PML4E       pPml4eSrc = NULL;           /* ditto */
    276     X86PDPE         PdpeSrc;
    277     PGSTPD          pPDSrc;
    278 
    279     pPDSrc = pgmGstGetLongModePDPtr(pVCpu, pvFault, &pPml4eSrc, &PdpeSrc, &iPDSrc);
    280     Assert(pPml4eSrc);
    281 #    endif
    282 
    283     /* Quick check for a valid guest trap. (PAE & AMD64) */
    284     if (!pPDSrc)
    285     {
    286 #    if PGM_GST_TYPE == PGM_TYPE_AMD64 && GC_ARCH_BITS == 64
    287         LogFlow(("Trap0eHandler: guest PML4 %d not present CR3=%RGp\n", (int)((pvFault >> X86_PML4_SHIFT) & X86_PML4_MASK), CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));
    288 #    else
    289         LogFlow(("Trap0eHandler: guest iPDSrc=%u not present CR3=%RGp\n", iPDSrc, CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK));
    290 #    endif
    291         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
    292         TRPMSetErrorCode(pVCpu, uErr);
    293         return VINF_EM_RAW_GUEST_TRAP;
    294     }
    295 #   endif
    296 
    297 #  else  /* !PGM_WITH_PAGING */
    298     PGSTPD          pPDSrc = NULL;
    299     const unsigned  iPDSrc = 0;
    300 #  endif /* !PGM_WITH_PAGING */
    301 
    302 # if !defined(PGM_WITHOUT_MAPPINGS) && ((PGM_GST_TYPE == PGM_TYPE_32BIT) || (PGM_GST_TYPE == PGM_TYPE_PAE))
    303     /*
    304      * Check for write conflicts with our hypervisor mapping early on. If the guest happens to access a non-present page,
    305      * where our hypervisor is currently mapped, then we'll create a #PF storm in the guest.
    306      */
    307     if (    (uErr & (X86_TRAP_PF_P | X86_TRAP_PF_RW)) == (X86_TRAP_PF_P | X86_TRAP_PF_RW)
    308         &&  MMHyperIsInsideArea(pVM, pvFault))
    309     {
    310         /* Force a CR3 sync to check for conflicts and emulate the instruction. */
    311         VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    312         return VINF_EM_RAW_EMULATE_INSTR;
    313     }
    314 # endif
    315 
    316     /*
    317      * First check for a genuine guest page fault.
    318      */
    319     /** @todo This duplicates the page table walk we're doing below. Need to
    320      *        find some way to avoid this double work, probably by caching
    321      *        the data. */
    322 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    323     STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    324     rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDSrc->a[iPDSrc], pvFault);
    325     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    326     if (rc == VINF_EM_RAW_GUEST_TRAP)
    327     {
    328         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; });
    329         return rc;
    330     }
    331 #  endif /* PGM_WITH_PAGING */
    332 #endif /* !PGM_WITH_GST_WALK */
    333 
    334237    /* Take the big lock now. */
    335238    *pfLockTaken = true;
     
    339242     * Fetch the guest PDE, PDPE and PML4E.
    340243     */
    341 #ifndef PGM_WITH_GST_WALK
    342 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    343     GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
    344 #  else
    345     GSTPDE PdeSrc;
    346     PdeSrc.u            = 0; /* faked so we don't have to #ifdef everything */
    347     PdeSrc.n.u1Present  = 1;
    348     PdeSrc.n.u1Write    = 1;
    349     PdeSrc.n.u1Accessed = 1;
    350     PdeSrc.n.u1User     = 1;
    351 #  endif
    352 
    353 #endif /* !PGM_WITH_GST_WALK */
    354244#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    355245    const unsigned  iPDDst = pvFault >> SHW_PD_SHIFT;
     
    360250
    361251    PX86PDPAE       pPDDst;
    362 #ifdef PGM_WITH_GST_WALK
    363252#   if PGM_GST_TYPE == PGM_TYPE_PAE
    364253    rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, GstWalk.Pdpe.u, &pPDDst);
     
    366255    rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, X86_PDPE_P, &pPDDst);       /* RW, US and A are reserved in PAE mode. */
    367256#   endif
    368 #else
    369 #    if PGM_GST_TYPE != PGM_TYPE_PAE
    370     X86PDPE         PdpeSrc;
    371 
    372     /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
    373     PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
    374 #    endif
    375     rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, PdpeSrc.u, &pPDDst);
    376 #endif
    377257    if (rc != VINF_SUCCESS)
    378258    {
     
    385265    const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
    386266    PX86PDPAE       pPDDst;
    387 #ifdef PGM_WITH_GST_WALK
    388267#   if PGM_GST_TYPE == PGM_TYPE_PROT  /* (AMD-V nested paging) */
    389268    rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A,
    390                                  X86_PDPE_P  | X86_PDPE_RW  | X86_PDPE_US | X86_PDPE_A, &pPDDst);
     269                                 X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A, &pPDDst);
    391270#   else
    392271    rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, GstWalk.Pml4e.u, GstWalk.Pdpe.u, &pPDDst);
    393272#   endif
    394 #else
    395 #   if PGM_GST_TYPE == PGM_TYPE_PROT
    396     /* AMD-V nested paging */
    397     X86PML4E        Pml4eSrc;
    398     X86PDPE         PdpeSrc;
    399     PX86PML4E       pPml4eSrc = &Pml4eSrc;
    400 
    401     /* Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
    402     Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
    403     PdpeSrc.u  = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;
    404 #   endif
    405 
    406     rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc->u, PdpeSrc.u, &pPDDst);
    407 #endif /* !PGM_WITH_GST_WALK */
    408273    if (rc != VINF_SUCCESS)
    409274    {
     
    435300         */
    436301        STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
    437 #ifdef PGM_WITH_GST_WALK
    438302        rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], GstWalk.pPde, pvFault);
    439 #else
    440         rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
    441 #endif
    442303        STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
    443304        if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
     
    448309            return VINF_SUCCESS;
    449310        }
    450 #ifdef PGM_WITH_GST_WALK
    451311        AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u || GstWalk.pPte->u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u));
    452312        AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u));
    453 #endif
    454313    }
    455314
     
    471330     *
    472331     */
    473 #ifdef PGM_WITH_GST_WALK
    474332#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    475333    Assert(GstWalk.Pde.n.u1Present);
    476334#  endif
    477 #else
    478     Assert(PdeSrc.n.u1Present);
    479 #endif
    480335    if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
    481336        &&  !pPDDst->a[iPDDst].n.u1Present
     
    484339        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
    485340        STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    486 #ifdef PGM_WITH_GST_WALK
    487341#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    488342        LogFlow(("=>SyncPT %04x = %08RX64\n", (pvFault >> GST_PD_SHIFT) & GST_PD_MASK, (uint64_t)GstWalk.Pde.u));
     
    492346        rc = PGM_BTH_NAME(SyncPT)(pVCpu, 0, NULL, pvFault);
    493347#  endif
    494 #else  /* !PGM_WITH_GST_WALK */
    495         LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
    496         rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
    497 #endif  /* !PGM_WITH_GST_WALK */
    498348        if (RT_SUCCESS(rc))
    499349        {
     
    501351            return rc;
    502352        }
    503 #ifdef PGM_WITH_GST_WALK
    504353        Log(("SyncPT: %RGv failed!! rc=%Rrc\n", pvFault, rc));
    505 #else
    506         Log(("SyncPT: %d failed!! rc=%d\n", iPDSrc, rc));
    507 #endif
    508354        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
    509355        STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
     
    536382                    unsigned iPT = pMapping->cb >> GST_PD_SHIFT;
    537383                    while (iPT-- > 0)
    538 #ifdef PGM_WITH_GST_WALK
    539384                        if (GstWalk.pPde[iPT].n.u1Present)
    540 #else
    541                         if (pPDSrc->a[iPDSrc + iPT].n.u1Present)
    542 #endif
    543385                        {
    544386                            STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eConflicts);
     
    595437     * in page tables which the guest believes to be present.
    596438     */
    597 #ifdef PGM_WITH_GST_WALK
    598439#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    599440    Assert(GstWalk.Pde.n.u1Present);
    600441#  endif
    601 #else
    602     Assert(PdeSrc.n.u1Present);
    603 #endif
    604     {
    605 #ifdef PGM_WITH_GST_WALK
     442    {
    606443#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    607444        RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
    608 #if 1
    609         RTGCPHYS GCPhys3;
    610         if (    GstWalk.Pde.b.u1Size &&  GST_IS_PSE_ACTIVE(pVCpu))
    611             GCPhys3 = GST_GET_PDE_BIG_PG_GCPHYS(pVM, GstWalk.Pde)
    612                      | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
    613         else
    614             GCPhys3 = GstWalk.Pte.u & GST_PTE_PG_MASK;
    615         Assert(GCPhys3 == GCPhys);
    616 #endif
    617445#  else
    618446        RTGCPHYS GCPhys = (RTGCPHYS)pvFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
    619447#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    620 #else
    621         RTGCPHYS    GCPhys = NIL_RTGCPHYS;
    622 
    623 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    624         if (    PdeSrc.b.u1Size
    625             &&  GST_IS_PSE_ACTIVE(pVCpu))
    626             GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(pVM, PdeSrc)
    627                     | ((RTGCPHYS)pvFault & (GST_BIG_PAGE_OFFSET_MASK ^ PAGE_OFFSET_MASK));
    628         else
    629         {
    630             PGSTPT pPTSrc;
    631             rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
    632             if (RT_SUCCESS(rc))
    633             {
    634                 unsigned iPTESrc = (pvFault >> GST_PT_SHIFT) & GST_PT_MASK;
    635                 if (pPTSrc->a[iPTESrc].n.u1Present)
    636                     GCPhys = pPTSrc->a[iPTESrc].u & GST_PTE_PG_MASK;
    637             }
    638         }
    639 #  else
    640         /* No paging so the fault address is the physical address */
    641         GCPhys = (RTGCPHYS)(pvFault & ~PAGE_OFFSET_MASK);
    642 #  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    643 #endif /* !PGM_WITH_GST_WALK */
    644448
    645449        /*
    646450         * If we have a GC address we'll check if it has any flags set.
    647451         */
    648 #ifndef PGM_WITH_GST_WALK
    649         if (GCPhys != NIL_RTGCPHYS)
    650 #endif
    651452        {
    652453            STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     
    678479                                && !(uErr & X86_TRAP_PF_P))
    679480                            {
    680 #ifdef PGM_WITH_GST_WALK
    681481#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    682482                                rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
     
    684484                                rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);
    685485#   endif
    686 #else
    687                                 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    688 #endif
    689486                                if (    RT_FAILURE(rc)
    690487                                    || !(uErr & X86_TRAP_PF_RW)
     
    754551                            && !(uErr & X86_TRAP_PF_P))
    755552                        {
    756 #ifdef PGM_WITH_GST_WALK
    757553                            rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
    758 #else
    759                             rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    760 #endif
    761554                            if (    RT_FAILURE(rc)
    762555                                ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
     
    855648                        &&  !(uErr & X86_TRAP_PF_P))
    856649                    {
    857 #ifdef PGM_WITH_GST_WALK
    858650#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    859651                        rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
     
    861653                        rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);
    862654#  endif
    863 #else
    864                         rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    865 #endif
    866655                        if (    RT_FAILURE(rc)
    867656                            ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
     
    977766                uint64_t   fPageGst2;
    978767                PGMGstGetPage(pVCpu, pvFault, &fPageGst2, &GCPhys2);
    979 #ifdef PGM_WITH_GST_WALK
    980 #   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     768#    if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    981769                Log(("Page out of sync: %RGv eip=%08x PdeSrc.US=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n",
    982770                     pvFault, pRegFrame->eip, GstWalk.Pde.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)));
    983 #   else
     771#    else
    984772                Log(("Page out of sync: %RGv eip=%08x fPageGst2=%08llx GCPhys2=%RGp scan=%d\n",
    985773                     pvFault, pRegFrame->eip, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)));
    986 #   endif
    987 #else
    988                 Log(("Page out of sync: %RGv eip=%08x PdeSrc.n.u1User=%d fPageGst2=%08llx GCPhys2=%RGp scan=%d\n",
    989                      pvFault, pRegFrame->eip, PdeSrc.n.u1User, fPageGst2, GCPhys2, CSAMDoesPageNeedScanning(pVM, pRegFrame->eip)));
    990 #endif
     774#    endif
    991775#   endif /* LOG_ENABLED */
    992776
     
    1019803                                 */
    1020804                                LogFlow(("CSAM ring 3 job\n"));
    1021 #ifdef PGM_WITH_GST_WALK
    1022805                                int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr);
    1023 #else
    1024                                 int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);
    1025 #endif
    1026806                                AssertRC(rc2);
    1027807
     
    1072852                }
    1073853#   endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
    1074 #ifdef PGM_WITH_GST_WALK
    1075854#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    1076855                rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
     
    1078857                rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);
    1079858#   endif
    1080 #else
    1081                 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    1082 #endif
    1083859                if (RT_SUCCESS(rc))
    1084860                {
     
    1150926                     *       page is not present, which is not true in this case.
    1151927                     */
    1152 #ifdef PGM_WITH_GST_WALK
    1153928#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    1154929                    rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr);
     
    1156931                    rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, 1, uErr);
    1157932#   endif
    1158 #else
    1159                     rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);
    1160 #endif
    1161933                    if (RT_SUCCESS(rc))
    1162934                    {
     
    1176948                        {
    1177949                            rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, &GCPhys2);
    1178 #if defined(PGM_WITH_GST_WALK) && PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    1179                             AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64 EffRW=%d EffUS=%d uErr=%RGp cr4=%RX64 pvFault=%RGv\n", rc, fPageGst, GstWalk.Core.fEffectiveRW, GstWalk.Core.fEffectiveUS, uErr, CPUMGetGuestCR0(pVCpu), pvFault ));
    1180 #else
    1181950                            AssertMsg(RT_SUCCESS(rc) && (fPageGst & X86_PTE_RW), ("rc=%Rrc fPageGst=%RX64\n", rc, fPageGst));
    1182 #endif
    1183951                            LogFlow(("Obsolete physical monitor page out of sync %RGv - phys %RGp flags=%08llx\n", pvFault, GCPhys2, (uint64_t)fPageGst));
    1184952                        }
     
    1227995#  endif /* PGM_OUT_OF_SYNC_IN_GC */
    1228996        }
    1229 #ifndef PGM_WITH_GST_WALK
    1230         else /* GCPhys == NIL_RTGCPHYS */
    1231         {
    1232             /*
    1233              * Page not present in Guest OS or invalid page table address.
    1234              * This is potential virtual page access handler food.
    1235              *
    1236              * For the present we'll say that our access handlers don't
    1237              * work for this case - we've already discarded the page table
    1238              * not present case which is identical to this.
    1239              *
    1240              * When we perchance find we need this, we will probably have AVL
    1241              * trees (offset based) to operate on and we can measure their speed
    1242              * agains mapping a page table and probably rearrange this handling
    1243              * a bit. (Like, searching virtual ranges before checking the
    1244              * physical address.)
    1245              */
    1246         }
    1247 #endif
    1248997    }
    1249998    /** @todo This point is never really reached. Clean up later!  */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette