VirtualBox

Changeset 31093 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jul 26, 2010 8:02:01 AM (15 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
64045
Message:

PGM: More #PF cleanup, eliminating a few PGMGstGetPage() calls that are unnecessary now.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r31092 r31093  
    610610            {
    611611               /* Check by physical address. */
    612                 unsigned        iPage;
    613                 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK),
    614                                                      &pCur, &iPage);
     612                unsigned iPage;
     613                rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK), &pCur, &iPage);
    615614                Assert(RT_SUCCESS(rc) || !pCur);
    616615                if (    pCur
     
    734733        /*
    735734         * Page is not present in our page tables. Try to sync it!
    736          * BTW, fPageShw is invalid in this branch!
    737735         */
    738736        if (uErr & X86_TRAP_PF_US)
     
    768766
    769767#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0)
    770         if (CPUMGetGuestCPL(pVCpu, pRegFrame) == 0)
    771         {
    772             /** @todo It's not necessary to repeat this here, GstWalk has
    773              *        all the information. */
    774             uint64_t fPageGst;
    775             rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL);
    776             if (    RT_SUCCESS(rc)
    777                 && !(fPageGst & X86_PTE_US))
     768        if (   !GstWalk.Core.fEffectiveUS
     769            && CPUMGetGuestCPL(pVCpu, pRegFrame) == 0)
     770        {
     771            /* Note: Can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU. */
     772            if (    pvFault == (RTGCPTR)pRegFrame->eip
     773                ||  pvFault - pRegFrame->eip < 8    /* instruction crossing a page boundary */
     774#    ifdef CSAM_DETECT_NEW_CODE_PAGES
     775                ||  (   !PATMIsPatchGCAddr(pVM, pRegFrame->eip)
     776                     && CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))   /* any new code we encounter here */
     777#    endif /* CSAM_DETECT_NEW_CODE_PAGES */
     778               )
    778779            {
    779                 /* Note: Can't check for X86_TRAP_ID bit, because that requires execute disable support on the CPU. */
    780                 if (    pvFault == (RTGCPTR)pRegFrame->eip
    781                     ||  pvFault - pRegFrame->eip < 8    /* instruction crossing a page boundary */
     780                LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip));
     781                rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip);
     782                if (rc != VINF_SUCCESS)
     783                {
     784                    /*
     785                     * CSAM needs to perform a job in ring 3.
     786                     *
     787                     * Sync the page before going to the host context; otherwise we'll end up in a loop if
     788                     * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
     789                     */
     790                    LogFlow(("CSAM ring 3 job\n"));
     791                    int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr);
     792                    AssertRC(rc2);
     793
     794                    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
     795                    STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2CSAM; });
     796                    return rc;
     797                }
     798            }
    782799#    ifdef CSAM_DETECT_NEW_CODE_PAGES
    783                     ||  (   !PATMIsPatchGCAddr(pVM, pRegFrame->eip)
    784                          && CSAMDoesPageNeedScanning(pVM, pRegFrame->eip))   /* any new code we encounter here */
    785 #    endif /* CSAM_DETECT_NEW_CODE_PAGES */
    786                    )
     800            else if (    uErr == X86_TRAP_PF_RW
     801                     &&  pRegFrame->ecx >= 0x100         /* early check for movswd count */
     802                     &&  pRegFrame->ecx < 0x10000)
     803            {
     804                /* In case of a write to a non-present supervisor shadow page, we'll take special precautions
     805                 * to detect loading of new code pages.
     806                 */
     807
     808                /*
     809                 * Decode the instruction.
     810                 */
     811                RTGCPTR PC;
     812                rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs,
     813                                                  &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
     814                if (rc == VINF_SUCCESS)
    787815                {
    788                     LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip));
    789                     rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip);
    790                     if (rc != VINF_SUCCESS)
     816                    PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
     817                    uint32_t     cbOp;
     818                    rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, pDis, &cbOp);
     819
     820                    /* For now we'll restrict this to rep movsw/d instructions */
     821                    if (    rc == VINF_SUCCESS
     822                        &&  pDis->pCurInstr->opcode == OP_MOVSWD
     823                        &&  (pDis->prefix & PREFIX_REP))
    791824                    {
    792                         /*
    793                          * CSAM needs to perform a job in ring 3.
    794                          *
    795                          * Sync the page before going to the host context; otherwise we'll end up in a loop if
    796                          * CSAM fails (e.g. instruction crosses a page boundary and the next page is not present)
    797                          */
    798                         LogFlow(("CSAM ring 3 job\n"));
    799                         int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr);
    800                         AssertRC(rc2);
    801 
    802                         STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
    803                         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2CSAM; });
    804                         return rc;
     825                        CSAMMarkPossibleCodePage(pVM, pvFault);
    805826                    }
    806827                }
    807 #    ifdef CSAM_DETECT_NEW_CODE_PAGES
    808                 else if (    uErr == X86_TRAP_PF_RW
    809                          &&  pRegFrame->ecx >= 0x100         /* early check for movswd count */
    810                          &&  pRegFrame->ecx < 0x10000)
    811                 {
    812                     /* In case of a write to a non-present supervisor shadow page, we'll take special precautions
    813                      * to detect loading of new code pages.
    814                      */
    815 
    816                     /*
    817                      * Decode the instruction.
    818                      */
    819                     RTGCPTR PC;
    820                     rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs,
    821                                                       &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC);
    822                     if (rc == VINF_SUCCESS)
    823                     {
    824                         PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
    825                         uint32_t     cbOp;
    826                         rc = EMInterpretDisasOneEx(pVM, pVCpu, PC, pRegFrame, pDis, &cbOp);
    827 
    828                         /* For now we'll restrict this to rep movsw/d instructions */
    829                         if (    rc == VINF_SUCCESS
    830                             &&  pDis->pCurInstr->opcode == OP_MOVSWD
    831                             &&  (pDis->prefix & PREFIX_REP))
    832                         {
    833                             CSAMMarkPossibleCodePage(pVM, pvFault);
    834                         }
    835                     }
    836                 }
     828            }
    837829#    endif  /* CSAM_DETECT_NEW_CODE_PAGES */
    838830
    839                 /*
    840                  * Mark this page as safe.
    841                  */
    842                 /** @todo not correct for pages that contain both code and data!! */
    843                 Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true));
    844                 CSAMMarkPage(pVM, pvFault, true);
    845             }
     831            /*
     832             * Mark this page as safe.
     833             */
     834            /** @todo not correct for pages that contain both code and data!! */
     835            Log2(("CSAMMarkPage %RGv; scanned=%d\n", pvFault, true));
     836            CSAMMarkPage(pVM, pvFault, true);
    846837        }
    847838#   endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
     
    862853    {
    863854        /*
    864          * Write protected pages are make writable when the guest makes the first
    865          * write to it. This happens for pages that are shared, write monitored
    866          * and not yet allocated.
     855         * Write protected pages are made writable when the guest makes the
     856         * first write to it.  This happens for pages that are shared, write
     857         * monitored or not yet allocated.
    867858         *
    868          * Also, a side effect of not flushing global PDEs are out of sync pages due
    869          * to physical monitored regions, that are no longer valid.
     859         * We may also end up here when CR0.WP=0 in the guest.
     860         *
     861         * Also, a side effect of not flushing global PDEs are out of sync
     862         * pages due to physical monitored regions, that are no longer valid.
    870863         * Assume for now it only applies to the read/write flag.
    871864         */
    872         if (    RT_SUCCESS(rc)
    873             &&  (uErr & X86_TRAP_PF_RW))
    874         {
     865        if (uErr & X86_TRAP_PF_RW)
     866        {
     867            /*
     868             * Check if it is a read-only page.
     869             */
    875870            if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
    876871            {
     
    883878                {
    884879                    AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc));
     880                    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
    885881                    return rc;
    886882                }
     
    890886
    891887#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    892             /* Check to see if we need to emulate the instruction as X86_CR0_WP has been cleared. */
    893             if (    CPUMGetGuestCPL(pVCpu, pRegFrame) == 0
    894                 &&  (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG)
     888            /*
     889             * Check to see if we need to emulate the instruction if CR0.WP=0.
     890             */
     891            if (    !GstWalk.Core.fEffectiveRW
     892                &&  (CPUMGetGuestCR0(pVCpu) & (X86_CR0_WP | X86_CR0_PG)) == X86_CR0_PG
     893                &&  CPUMGetGuestCPL(pVCpu, pRegFrame) == 0)
    895894            {
    896895                Assert((uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_P)) == (X86_TRAP_PF_RW | X86_TRAP_PF_P));
    897                 /** @todo It's not necessary to repeat this here, GstWalk has
    898                  *        all the information. */
    899                 uint64_t fPageGst;
    900                 rc = PGMGstGetPage(pVCpu, pvFault, &fPageGst, NULL);
    901                 if (    RT_SUCCESS(rc)
    902                     && !(fPageGst & X86_PTE_RW))
    903                 {
    904                     rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault);
    905                     if (RT_SUCCESS(rc))
    906                         STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulInRZ);
    907                     else
    908                         STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulToR3);
    909                     return rc;
    910                 }
    911                 AssertMsg(RT_SUCCESS(rc), ("Unexpected r/w page %RGv flag=%x rc=%Rrc\n", pvFault, (uint32_t)fPageGst, rc));
     896                rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault);
     897                if (RT_SUCCESS(rc))
     898                    STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulInRZ);
     899                else
     900                    STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulToR3);
     901                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
     902                return rc;
    912903            }
    913904#   endif
     
    919910
    920911            /*
     912             * Sync the page.
     913             *
    921914             * Note: Do NOT use PGM_SYNC_NR_PAGES here. That only works if the
    922915             *       page is not present, which is not true in this case.
     
    930923            {
    931924               /*
    932                 * Page was successfully synced, return to guest.
    933                 * First invalidate the page as it might be in the TLB.
     925                * Page was successfully synced, return to guest but invalidate
     926                * the TLB first as the page is very likely to be in it.
    934927                */
    935928#   if PGM_SHW_TYPE == PGM_TYPE_EPT
     
    956949            }
    957950        }
    958 
    959 #   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    960 #    ifdef VBOX_STRICT
     951        /** @todo else: WTF are we here? */
     952
     953#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && defined(VBOX_STRICT)
    961954        /*
    962955         * Check for VMM page flags vs. Guest page flags consistency.
     
    985978        else
    986979            AssertMsgFailed(("PGMGCGetPage rc=%Rrc\n", rc));
    987 #    endif /* VBOX_STRICT */
    988 #   endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     980#   endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && VBOX_STRICT */
    989981    }
    990982    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
    991983
    992     /** @todo This point is never really reached, is it? */
     984    /** @todo This point is only ever reached when something goes awry.  The
     985     *        conclusion here is wrong, it is not a guest trap!  Will fix in
     986     *        a bit... */
    993987
    994988#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette