VirtualBox

Changeset 18992 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Apr 17, 2009 1:51:56 PM (16 years ago)
Author:
vboxsync
Message:

More PGM api changes

Location:
trunk/src/VBox/VMM
Files:
23 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/EM.cpp

    r18988 r18992  
    23692369         */
    23702370        case VINF_PGM_CHANGE_MODE:
    2371             rc = PGMChangeMode(pVM, pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
     2371            rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
    23722372            if (rc == VINF_SUCCESS)
    23732373                rc = VINF_EM_RESCHEDULE;
     
    26072607            && CSAMIsEnabled(pVM))
    26082608        {
    2609             int rc = PGMSyncCR3(pVM, pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     2609            int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    26102610            if (RT_FAILURE(rc))
    26112611                return rc;
     
    26322632    if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    26332633    {
    2634         int rc = PGMSyncCR3(pVM, pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     2634        int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    26352635        if (RT_FAILURE(rc))
    26362636            return rc;
     
    26402640        /* Prefetch pages for EIP and ESP. */
    26412641        /** @todo This is rather expensive. Should investigate if it really helps at all. */
    2642         rc = PGMPrefetchPage(pVM, pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
     2642        rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
    26432643        if (rc == VINF_SUCCESS)
    2644             rc = PGMPrefetchPage(pVM, pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
     2644            rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
    26452645        if (rc != VINF_SUCCESS)
    26462646        {
     
    26502650                return rc;
    26512651            }
    2652             rc = PGMSyncCR3(pVM, pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     2652            rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    26532653            if (RT_FAILURE(rc))
    26542654                return rc;
  • trunk/src/VBox/VMM/PATM/CSAM.cpp

    r18988 r18992  
    16671667
    16681668        /* Prefetch it in case it's not there yet. */
    1669         rc = PGMPrefetchPage(pVM, pVCpu, GCPtr);
     1669        rc = PGMPrefetchPage(pVCpu, GCPtr);
    16701670        AssertRC(rc);
    16711671
     
    17791779
    17801780        /* Prefetch it in case it's not there yet. */
    1781         rc = PGMPrefetchPage(pVM, pVCpu, pPageAddrGC);
     1781        rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
    17821782        AssertRC(rc);
    17831783
     
    18001800
    18011801        /* Prefetch it in case it's not there yet. */
    1802         rc = PGMPrefetchPage(pVM, pVCpu, pPageAddrGC);
     1802        rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
    18031803        AssertRC(rc);
    18041804
     
    18231823    {
    18241824        /* Prefetch it in case it's not there yet. */
    1825         rc = PGMPrefetchPage(pVM, pVCpu, pPageAddrGC);
     1825        rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
    18261826        AssertRC(rc);
    18271827        /* The page was changed behind our back. It won't be made read-only until the next SyncCR3, so force it here. */
  • trunk/src/VBox/VMM/PGM.cpp

    r18988 r18992  
    19881988        PGM_SHW_PFN(Relocate, pVCpu)(pVCpu, offDelta);
    19891989        PGM_GST_PFN(Relocate, pVCpu)(pVCpu, offDelta);
    1990         PGM_BTH_PFN(Relocate, pVCpu)(pVM, pVCpu, offDelta);
     1990        PGM_BTH_PFN(Relocate, pVCpu)(pVCpu, offDelta);
    19911991    }
    19921992
     
    39033903            {
    39043904                case PGMMODE_32_BIT:
    3905                     rc2 = PGM_BTH_NAME_32BIT_REAL(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3905                    rc2 = PGM_BTH_NAME_32BIT_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    39063906                    break;
    39073907                case PGMMODE_PAE:
    39083908                case PGMMODE_PAE_NX:
    3909                     rc2 = PGM_BTH_NAME_PAE_REAL(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3909                    rc2 = PGM_BTH_NAME_PAE_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    39103910                    break;
    39113911                case PGMMODE_NESTED:
    3912                     rc2 = PGM_BTH_NAME_NESTED_REAL(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3912                    rc2 = PGM_BTH_NAME_NESTED_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    39133913                    break;
    39143914                case PGMMODE_EPT:
    3915                     rc2 = PGM_BTH_NAME_EPT_REAL(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3915                    rc2 = PGM_BTH_NAME_EPT_REAL(Enter)(pVCpu, NIL_RTGCPHYS);
    39163916                    break;
    39173917                case PGMMODE_AMD64:
     
    39273927            {
    39283928                case PGMMODE_32_BIT:
    3929                     rc2 = PGM_BTH_NAME_32BIT_PROT(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3929                    rc2 = PGM_BTH_NAME_32BIT_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    39303930                    break;
    39313931                case PGMMODE_PAE:
    39323932                case PGMMODE_PAE_NX:
    3933                     rc2 = PGM_BTH_NAME_PAE_PROT(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3933                    rc2 = PGM_BTH_NAME_PAE_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    39343934                    break;
    39353935                case PGMMODE_NESTED:
    3936                     rc2 = PGM_BTH_NAME_NESTED_PROT(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3936                    rc2 = PGM_BTH_NAME_NESTED_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    39373937                    break;
    39383938                case PGMMODE_EPT:
    3939                     rc2 = PGM_BTH_NAME_EPT_PROT(Enter)(pVM, pVCpu, NIL_RTGCPHYS);
     3939                    rc2 = PGM_BTH_NAME_EPT_PROT(Enter)(pVCpu, NIL_RTGCPHYS);
    39403940                    break;
    39413941                case PGMMODE_AMD64:
     
    39523952            {
    39533953                case PGMMODE_32_BIT:
    3954                     rc2 = PGM_BTH_NAME_32BIT_32BIT(Enter)(pVM, pVCpu, GCPhysCR3);
     3954                    rc2 = PGM_BTH_NAME_32BIT_32BIT(Enter)(pVCpu, GCPhysCR3);
    39553955                    break;
    39563956                case PGMMODE_PAE:
    39573957                case PGMMODE_PAE_NX:
    3958                     rc2 = PGM_BTH_NAME_PAE_32BIT(Enter)(pVM, pVCpu, GCPhysCR3);
     3958                    rc2 = PGM_BTH_NAME_PAE_32BIT(Enter)(pVCpu, GCPhysCR3);
    39593959                    break;
    39603960                case PGMMODE_NESTED:
    3961                     rc2 = PGM_BTH_NAME_NESTED_32BIT(Enter)(pVM, pVCpu, GCPhysCR3);
     3961                    rc2 = PGM_BTH_NAME_NESTED_32BIT(Enter)(pVCpu, GCPhysCR3);
    39623962                    break;
    39633963                case PGMMODE_EPT:
    3964                     rc2 = PGM_BTH_NAME_EPT_32BIT(Enter)(pVM, pVCpu, GCPhysCR3);
     3964                    rc2 = PGM_BTH_NAME_EPT_32BIT(Enter)(pVCpu, GCPhysCR3);
    39653965                    break;
    39663966                case PGMMODE_AMD64:
     
    39873987                case PGMMODE_PAE:
    39883988                case PGMMODE_PAE_NX:
    3989                     rc2 = PGM_BTH_NAME_PAE_PAE(Enter)(pVM, pVCpu, GCPhysCR3);
     3989                    rc2 = PGM_BTH_NAME_PAE_PAE(Enter)(pVCpu, GCPhysCR3);
    39903990                    break;
    39913991                case PGMMODE_NESTED:
    3992                     rc2 = PGM_BTH_NAME_NESTED_PAE(Enter)(pVM, pVCpu, GCPhysCR3);
     3992                    rc2 = PGM_BTH_NAME_NESTED_PAE(Enter)(pVCpu, GCPhysCR3);
    39933993                    break;
    39943994                case PGMMODE_EPT:
    3995                     rc2 = PGM_BTH_NAME_EPT_PAE(Enter)(pVM, pVCpu, GCPhysCR3);
     3995                    rc2 = PGM_BTH_NAME_EPT_PAE(Enter)(pVCpu, GCPhysCR3);
    39963996                    break;
    39973997                case PGMMODE_32_BIT:
     
    40134013                case PGMMODE_AMD64:
    40144014                case PGMMODE_AMD64_NX:
    4015                     rc2 = PGM_BTH_NAME_AMD64_AMD64(Enter)(pVM, pVCpu, GCPhysCR3);
     4015                    rc2 = PGM_BTH_NAME_AMD64_AMD64(Enter)(pVCpu, GCPhysCR3);
    40164016                    break;
    40174017                case PGMMODE_NESTED:
    4018                     rc2 = PGM_BTH_NAME_NESTED_AMD64(Enter)(pVM, pVCpu, GCPhysCR3);
     4018                    rc2 = PGM_BTH_NAME_NESTED_AMD64(Enter)(pVCpu, GCPhysCR3);
    40194019                    break;
    40204020                case PGMMODE_EPT:
    4021                     rc2 = PGM_BTH_NAME_EPT_AMD64(Enter)(pVM, pVCpu, GCPhysCR3);
     4021                    rc2 = PGM_BTH_NAME_EPT_AMD64(Enter)(pVCpu, GCPhysCR3);
    40224022                    break;
    40234023                case PGMMODE_32_BIT:
  • trunk/src/VBox/VMM/PGMBth.h

    r18927 r18992  
    2626__BEGIN_DECLS
    2727PGM_BTH_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
    28 PGM_BTH_DECL(int, Enter)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
    29 PGM_BTH_DECL(int, Relocate)(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta);
    30 
    31 PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
    32 PGM_BTH_DECL(int, SyncCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
    33 PGM_BTH_DECL(int, SyncPage)(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError);
    34 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uError);
    35 PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage);
    36 PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage);
    37 PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
    38 PGM_BTH_DECL(int, MapCR3)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
    39 PGM_BTH_DECL(int, UnmapCR3)(PVM pVM, PVMCPU pVCpu);
     28PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
     29PGM_BTH_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
     30
     31PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
     32PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
     33PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError);
     34PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uError);
     35PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
     36PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
     37PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
     38PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
     39PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu);
    4040__END_DECLS
    4141
     
    129129 * @param   GCPhysCR3   The physical address from the CR3 register.
    130130 */
    131 PGM_BTH_DECL(int, Enter)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
     131PGM_BTH_DECL(int, Enter)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
    132132{
    133133    /* Here we deal with allocation of the root shadow page table for real and protected mode during mode switches;
     
    139139      && (   PGM_GST_TYPE == PGM_TYPE_REAL   \
    140140          || PGM_GST_TYPE == PGM_TYPE_PROT))
     141
     142    PVM pVM   = pVCpu->pVMR3;
    141143
    142144    Assert(!HWACCMIsNestedPagingActive(pVM));
     
    202204 * @param   offDelta    The reloation offset.
    203205 */
    204 PGM_BTH_DECL(int, Relocate)(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta)
     206PGM_BTH_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta)
    205207{
    206208    /* nothing special to do here - InitData does the job. */
  • trunk/src/VBox/VMM/PGMGst.h

    r18988 r18992  
    9797     * Map and monitor CR3
    9898     */
    99     int rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu->pVMR3, pVCpu, GCPhysCR3);
     99    int rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
    100100    return rc;
    101101}
     
    133133    int rc;
    134134
    135     rc = PGM_BTH_PFN(UnmapCR3, pVCpu)(pVCpu->pVMR3, pVCpu);
     135    rc = PGM_BTH_PFN(UnmapCR3, pVCpu)(pVCpu);
    136136    return rc;
    137137}
  • trunk/src/VBox/VMM/PGMInternal.h

    r18988 r18992  
    21112111     * @{
    21122112     */
    2113     DECLR3CALLBACKMEMBER(int,       pfnR3BthRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta));
     2113    DECLR3CALLBACKMEMBER(int,       pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    21142114    /*                           no pfnR3BthTrap0eHandler */
    2115     DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2116     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2117     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2118     DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2119     DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     2115    DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2116    DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
     2117    DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
     2118    DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2119    DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    21202120#ifdef VBOX_STRICT
    2121     DECLR3CALLBACKMEMBER(unsigned,  pfnR3BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2122 #endif
    2123     DECLR3CALLBACKMEMBER(int,       pfnR3BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2124     DECLR3CALLBACKMEMBER(int,       pfnR3BthUnmapCR3,(PVM pVM, PVMCPU pVCpu));
    2125 
    2126     DECLRCCALLBACKMEMBER(int,       pfnRCBthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    2127     DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2128     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2129     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2130     DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2131     DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     2121    DECLR3CALLBACKMEMBER(unsigned,  pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
     2122#endif
     2123    DECLR3CALLBACKMEMBER(int,       pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
     2124    DECLR3CALLBACKMEMBER(int,       pfnR3BthUnmapCR3,(PVMCPU pVCpu));
     2125
     2126    DECLRCCALLBACKMEMBER(int,       pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
     2127    DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2128    DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
     2129    DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
     2130    DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2131    DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    21322132#ifdef VBOX_STRICT
    2133     DECLRCCALLBACKMEMBER(unsigned,  pfnRCBthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2134 #endif
    2135     DECLRCCALLBACKMEMBER(int,       pfnRCBthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2136     DECLRCCALLBACKMEMBER(int,       pfnRCBthUnmapCR3,(PVM pVM, PVMCPU pVCpu));
    2137 
    2138     DECLR0CALLBACKMEMBER(int,       pfnR0BthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    2139     DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2140     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2141     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2142     DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2143     DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     2133    DECLRCCALLBACKMEMBER(unsigned,  pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
     2134#endif
     2135    DECLRCCALLBACKMEMBER(int,       pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
     2136    DECLRCCALLBACKMEMBER(int,       pfnRCBthUnmapCR3,(PVMCPU pVCpu));
     2137
     2138    DECLR0CALLBACKMEMBER(int,       pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
     2139    DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2140    DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
     2141    DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
     2142    DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2143    DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    21442144#ifdef VBOX_STRICT
    2145     DECLR0CALLBACKMEMBER(unsigned,  pfnR0BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2146 #endif
    2147     DECLR0CALLBACKMEMBER(int,       pfnR0BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2148     DECLR0CALLBACKMEMBER(int,       pfnR0BthUnmapCR3,(PVM pVM, PVMCPU pVCpu));
     2145    DECLR0CALLBACKMEMBER(unsigned,  pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
     2146#endif
     2147    DECLR0CALLBACKMEMBER(int,       pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
     2148    DECLR0CALLBACKMEMBER(int,       pfnR0BthUnmapCR3,(PVMCPU pVCpu));
    21492149    /** @} */
    21502150} PGMMODEDATA, *PPGMMODEDATA;
     
    26262626     * @{
    26272627     */
    2628     DECLR3CALLBACKMEMBER(int,       pfnR3BthRelocate,(PVM pVM, PVMCPU pVCpu, RTGCPTR offDelta));
     2628    DECLR3CALLBACKMEMBER(int,       pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    26292629    /*                           no pfnR3BthTrap0eHandler */
    2630     DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2631     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2632     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2633     DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2634     DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2635     DECLR3CALLBACKMEMBER(unsigned,  pfnR3BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2636     DECLR3CALLBACKMEMBER(int,       pfnR3BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2637     DECLR3CALLBACKMEMBER(int,       pfnR3BthUnmapCR3,(PVM pVM, PVMCPU pVCpu));
    2638 
    2639     DECLR0CALLBACKMEMBER(int,       pfnR0BthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    2640     DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2641     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2642     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2643     DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2644     DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2645     DECLR0CALLBACKMEMBER(unsigned,  pfnR0BthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2646     DECLR0CALLBACKMEMBER(int,       pfnR0BthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2647     DECLR0CALLBACKMEMBER(int,       pfnR0BthUnmapCR3,(PVM pVM, PVMCPU pVCpu));
    2648 
    2649     DECLRCCALLBACKMEMBER(int,       pfnRCBthTrap0eHandler,(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    2650     DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2651     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2652     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVM pVM, PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2653     DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2654     DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2655     DECLRCCALLBACKMEMBER(unsigned,  pfnRCBthAssertCR3,(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2656     DECLRCCALLBACKMEMBER(int,       pfnRCBthMapCR3,(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2657     DECLRCCALLBACKMEMBER(int,       pfnRCBthUnmapCR3,(PVM pVM, PVMCPU pVCpu));
     2630    DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2631    DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
     2632    DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
     2633    DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2634    DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     2635    DECLR3CALLBACKMEMBER(unsigned,  pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
     2636    DECLR3CALLBACKMEMBER(int,       pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
     2637    DECLR3CALLBACKMEMBER(int,       pfnR3BthUnmapCR3,(PVMCPU pVCpu));
     2638
     2639    DECLR0CALLBACKMEMBER(int,       pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
     2640    DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2641    DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
     2642    DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
     2643    DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2644    DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     2645    DECLR0CALLBACKMEMBER(unsigned,  pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
     2646    DECLR0CALLBACKMEMBER(int,       pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
     2647    DECLR0CALLBACKMEMBER(int,       pfnR0BthUnmapCR3,(PVMCPU pVCpu));
     2648
     2649    DECLRCCALLBACKMEMBER(int,       pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
     2650    DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2651    DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
     2652    DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
     2653    DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
     2654    DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     2655    DECLRCCALLBACKMEMBER(unsigned,  pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
     2656    DECLRCCALLBACKMEMBER(int,       pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
     2657    DECLRCCALLBACKMEMBER(int,       pfnRCBthUnmapCR3,(PVMCPU pVCpu));
    26582658#if HC_ARCH_BITS == 64
    26592659    RTRCPTR                         alignment2; /**< structure size alignment. */
     
    29572957int             pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
    29582958
    2959 int             pgmShwSyncPaePDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
     2959int             pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    29602960#ifndef IN_RC
    2961 int             pgmShwSyncLongModePDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    2962 #endif
    2963 int             pgmShwGetEPTPDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
     2961int             pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
     2962#endif
     2963int             pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
    29642964
    29652965PX86PD          pgmGstLazyMap32BitPD(PPGMCPU pPGM);
  • trunk/src/VBox/VMM/PGMMap.cpp

    r18927 r18992  
    535535     * pending relocations because of these mappings have been resolved.
    536536     */
    537     PGMSyncCR3(pVM, pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
     537    PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), true);
    538538
    539539    /*
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r18927 r18992  
    13891389        LogFlow(("emInterpretStosWD dest=%04X:%RGv (%RGv) cbSize=%d cTransfers=%x DF=%d\n", pRegFrame->es, GCOffset, GCDest, cbSize, cTransfers, pRegFrame->eflags.Bits.u1DF));
    13901390        /* Access verification first; we currently can't recover properly from traps inside this instruction */
    1391         rc = PGMVerifyAccess(pVM, pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
     1391        rc = PGMVerifyAccess(pVCpu, GCDest - ((offIncrement > 0) ? 0 : ((cTransfers-1) * cbSize)),
    13921392                             cTransfers * cbSize,
    13931393                             X86_PTE_RW | (CPUMGetGuestCPL(pVCpu, pRegFrame) == 3 ? X86_PTE_US : 0));
     
    18701870    LogFlow(("RC: EMULATE: invlpg %RGv\n", pAddrGC));
    18711871#endif
    1872     rc = PGMInvalidatePage(pVM, pVCpu, pAddrGC);
     1872    rc = PGMInvalidatePage(pVCpu, pAddrGC);
    18731873    if (    rc == VINF_SUCCESS
    18741874        ||  rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
     
    19131913    LogFlow(("RC: EMULATE: invlpg %RGv\n", addr));
    19141914#endif
    1915     rc = PGMInvalidatePage(pVM, pVCpu, addr);
     1915    rc = PGMInvalidatePage(pVCpu, addr);
    19161916    if (    rc == VINF_SUCCESS
    19171917        ||  rc == VINF_PGM_SYNC_CR3 /* we can rely on the FF */)
     
    20652065        {
    20662066            /* global flush */
    2067             rc = PGMFlushTLB(pVM, pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
     2067            rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
    20682068            AssertRCReturn(rc, rc);
    20692069        }
     
    21002100            CPUMSetGuestEFER(pVCpu, msrEFER);
    21012101        }
    2102         rc2 = PGMChangeMode(pVM, pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
     2102        rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
    21032103        return rc2 == VINF_SUCCESS ? rc : rc2;
    21042104
     
    21132113        {
    21142114            /* flush */
    2115             rc = PGMFlushTLB(pVM, pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
     2115            rc = PGMFlushTLB(pVCpu, val, !(CPUMGetGuestCR4(pVCpu) & X86_CR4_PGE));
    21162116            AssertRCReturn(rc, rc);
    21172117        }
     
    21372137        {
    21382138            /* global flush */
    2139             rc = PGMFlushTLB(pVM, pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
     2139            rc = PGMFlushTLB(pVCpu, CPUMGetGuestCR3(pVCpu), true /* global */);
    21402140            AssertRCReturn(rc, rc);
    21412141        }
     
    21532153            VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
    21542154
    2155         rc2 = PGMChangeMode(pVM, pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
     2155        rc2 = PGMChangeMode(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR4(pVCpu), CPUMGetGuestEFER(pVCpu));
    21562156        return rc2 == VINF_SUCCESS ? rc : rc2;
    21572157
  • trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp

    r18988 r18992  
    395395
    396396            /* Access verification first; we currently can't recover properly from traps inside this instruction */
    397             rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
     397            rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
    398398            if (rc != VINF_SUCCESS)
    399399            {
     
    492492             */
    493493            /* Access verification first; we currently can't recover properly from traps inside this instruction */
    494             rc = PGMVerifyAccess(pVM, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
     494            rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
    495495            if (rc != VINF_SUCCESS)
    496496            {
     
    15051505    uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
    15061506
    1507     rc = PGMVerifyAccess(pVM, pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
     1507    rc = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
    15081508                         X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
    15091509    if (rc != VINF_SUCCESS)
     
    16661666    /* Access verification first; we currently can't recover properly from traps inside this instruction */
    16671667    uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
    1668     rc = PGMVerifyAccess(pVM, pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
     1668    rc = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
    16691669                         (cpl == 3) ? X86_PTE_US : 0);
    16701670    if (rc != VINF_SUCCESS)
     
    18201820    uint64_t fFlags;
    18211821    RTHCPHYS HCPhys;
    1822     rc = PGMShwGetPage(pVM, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
     1822    rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
    18231823    Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    18241824# endif
    18251825#endif
    1826     rc = PGMPrefetchPage(pVM, pVCpu, (RTGCPTR)GCPhys);
     1826    rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
    18271827    Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
    18281828    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r18988 r18992  
    7171*   Internal Functions                                                         *
    7272*******************************************************************************/
    73 DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
    74 DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
     73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
    7574DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
    7675
     
    395394 *
    396395 * @returns VBox status code (appropriate for trap handling and GC return).
    397  * @param   pVM         VM Handle.
    398396 * @param   pVCpu       VMCPU handle.
    399397 * @param   uErr        The trap error code.
     
    401399 * @param   pvFault     The fault address.
    402400 */
    403 VMMDECL(int)     PGMTrap0eHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
     401VMMDECL(int)     PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
    404402{
    405403    LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
     
    451449     * Call the worker.
    452450     */
    453     int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVM, pVCpu, uErr, pRegFrame, pvFault);
     451    int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault);
    454452    if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
    455453        rc = VINF_SUCCESS;
     
    472470 * @retval  VINF_SUCCESS on success.
    473471 * @retval  VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
    474  * @param   pVM         VM handle.
    475472 * @param   pVCpu       VMCPU handle.
    476473 * @param   GCPtrPage   Page to invalidate.
    477474 */
    478 VMMDECL(int) PGMPrefetchPage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)
     475VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
    479476{
    480477    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
    481     int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVM, pVCpu, GCPtrPage);
     478    int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
    482479    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
    483480    AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
     
    516513 *
    517514 * @returns VBox status code.
    518  * @param   pVM         VM handle.
    519515 * @param   pVCpu       VMCPU handle.
    520516 * @param   Addr        Guest virtual address to check
     
    523519 * @remarks Current not in use.
    524520 */
    525 VMMDECL(int) PGMIsValidAccess(PVM pVM, PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
     521VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
    526522{
    527523    /*
     
    559555    if (    RT_SUCCESS(rc)
    560556        &&  PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
    561         return PGMIsValidAccess(pVM, pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
     557        return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
    562558    return rc;
    563559}
     
    570566 *
    571567 * @returns VBox status code.
    572  * @param   pVM         VM handle.
    573568 * @param   pVCpu       VMCPU handle.
    574569 * @param   Addr        Guest virtual address to check
     
    576571 * @param   fAccess     Access type (r/w, user/supervisor (X86_PTE_*))
    577572 */
    578 VMMDECL(int) PGMVerifyAccess(PVM pVM, PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
    579 {
     573VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
     574{
     575    PVM pVM = pVCpu->CTX_SUFF(pVM);
     576
    580577    AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
    581578
     
    622619            Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
    623620            uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
    624             rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVM, pVCpu, Addr, fPageGst, uErr);
     621            rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
    625622            if (rc != VINF_SUCCESS)
    626623                return rc;
     
    659656            else
    660657                cbSize = 1;
    661             rc = PGMVerifyAccess(pVM, pVCpu, Addr, 1, fAccess);
     658            rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
    662659            if (rc != VINF_SUCCESS)
    663660                break;
     
    678675 * @retval  VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
    679676 *
    680  * @param   pVM         VM handle.
    681677 * @param   pVCpu       VMCPU handle.
    682678 * @param   GCPtrPage   Page to invalidate.
     
    687683 * @todo    Flush page or page directory only if necessary!
    688684 */
    689 VMMDECL(int) PGMInvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)
    690 {
     685VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
     686{
     687    PVM pVM = pVCpu->CTX_SUFF(pVM);
    691688    int rc;
    692689    Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
     
    731728     */
    732729    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
    733     rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVM, pVCpu, GCPtrPage);
     730    rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
    734731    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
    735732
     
    843840}
    844841
    845 
    846 #if 0 /* obsolete */
    847 /**
    848  * Gets the SHADOW page directory pointer for the specified address.
    849  *
    850  * @returns VBox status.
    851  * @param   pVM         VM handle.
    852  * @param   pVCpu       VMCPU handle.
    853  * @param   GCPtr       The address.
    854  * @param   ppPdpt      Receives address of pdpt
    855  * @param   ppPD        Receives address of page directory
    856  * @remarks Unused.
    857  */
    858 DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
    859 {
    860     PPGM           pPGM   = &pVM->pgm.s;
    861     PPGMPOOL       pPool  = pPGM->CTX_SUFF(pPool);
    862     PPGMPOOLPAGE   pShwPage;
    863 
    864     Assert(!HWACCMIsNestedPagingActive(pVM));
    865 
    866     const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    867     PX86PDPT  pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    868     PX86PDPE  pPdpe = &pPdpt->a[iPdPt];
    869 
    870     *ppPdpt = pPdpt;
    871     if (!pPdpe->n.u1Present)
    872         return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
    873 
    874     Assert(pPdpe->u & X86_PDPE_PG_MASK);
    875     pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
    876     AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
    877 
    878     *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
    879     return VINF_SUCCESS;
    880 }
    881 #endif
    882 
    883842/**
    884843 * Gets the shadow page directory for the specified address, PAE.
    885844 *
    886845 * @returns Pointer to the shadow PD.
    887  * @param   pVM         VM handle.
    888846 * @param   pVCpu       The VMCPU handle.
    889847 * @param   GCPtr       The address.
     
    891849 * @param   ppPD        Receives address of page directory
    892850 */
    893 int pgmShwSyncPaePDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
     851int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
    894852{
    895853    const unsigned iPdPt    = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    896854    PX86PDPT       pPdpt    = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
    897855    PX86PDPE       pPdpe    = &pPdpt->a[iPdPt];
     856    PVM            pVM      = pVCpu->CTX_SUFF(pVM);
    898857    PPGMPOOL       pPool    = pVM->pgm.s.CTX_SUFF(pPool);
    899858    PPGMPOOLPAGE   pShwPage;
     
    1018977 *
    1019978 * @returns VBox status.
    1020  * @param   pVM         VM handle.
    1021979 * @param   pVCpu       VMCPU handle.
    1022980 * @param   GCPtr       The address.
     
    1025983 * @param   ppPD        Receives address of page directory
    1026984 */
    1027 int pgmShwSyncLongModePDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
     985int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
    1028986{
    1029987    PPGMCPU        pPGM          = &pVCpu->pgm.s;
     988    PVM            pVM           = pVCpu->CTX_SUFF(pVM);
    1030989    PPGMPOOL       pPool         = pVM->pgm.s.CTX_SUFF(pPool);
    1031990    const unsigned iPml4         = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    1032991    PX86PML4E      pPml4e        = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
    1033992    bool           fNestedPaging = HWACCMIsNestedPagingActive(pVM);
    1034     bool           fPaging      = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
     993    bool           fPaging       = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
    1035994    PPGMPOOLPAGE   pShwPage;
    1036995    int            rc;
     
    11231082 *
    11241083 * @returns VBox status.
    1125  * @param   pVM         VM handle.
    11261084 * @param   pVCpu       VMCPU handle.
    11271085 * @param   GCPtr       The address.
     
    11291087 * @param   ppPD        Receives address of page directory
    11301088 */
    1131 DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
     1089DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
    11321090{
    11331091    PPGMCPU         pPGM = &pVCpu->pgm.s;
     
    11431101        return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
    11441102
    1145     PPGMPOOL        pPool = pVM->pgm.s.CTX_SUFF(pPool);
     1103    PVM             pVM      = pVCpu->CTX_SUFF(pVM);
     1104    PPGMPOOL        pPool    = pVM->pgm.s.CTX_SUFF(pPool);
    11461105    PPGMPOOLPAGE    pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
    11471106    AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
     
    11651124 *
    11661125 * @returns VBox status.
    1167  * @param   pVM         VM handle.
    11681126 * @param   pVCpu       VMCPU handle.
    11691127 * @param   GCPtr       The address.
     
    11711129 * @param   ppPD        Receives address of page directory
    11721130 */
    1173 int pgmShwGetEPTPDPtr(PVM pVM, PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
     1131int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
    11741132{
    11751133    PPGMCPU        pPGM  = &pVCpu->pgm.s;
     1134    PVM            pVM   = pVCpu->CTX_SUFF(pVM);
    11761135    const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
    11771136    PPGMPOOL       pPool = pVM->pgm.s.CTX_SUFF(pPool);
     
    16381597 * @retval  VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
    16391598 *          safely be ignored and overridden since the FF will be set too then.
    1640  * @param   pVM         VM handle.
    16411599 * @param   pVCpu       VMCPU handle.
    16421600 * @param   cr3         The new cr3.
    16431601 * @param   fGlobal     Indicates whether this is a global flush or not.
    16441602 */
    1645 VMMDECL(int) PGMFlushTLB(PVM pVM, PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
    1646 {
     1603VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
     1604{
     1605    PVM pVM = pVCpu->CTX_SUFF(pVM);
     1606
    16471607    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
    16481608
     
    16801640        RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
    16811641        pVCpu->pgm.s.GCPhysCR3  = GCPhysCR3;
    1682         rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVM, pVCpu, GCPhysCR3);
     1642        rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
    16831643        if (RT_LIKELY(rc == VINF_SUCCESS))
    16841644        {
     
    17381698 *          requires a CR3 sync. This can safely be ignored and overridden since
    17391699 *          the FF will be set too then.)
    1740  * @param   pVM         VM handle.
    17411700 * @param   pVCpu       VMCPU handle.
    17421701 * @param   cr3         The new cr3.
    17431702 */
    1744 VMMDECL(int) PGMUpdateCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3)
    1745 {
     1703VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
     1704{
     1705    PVM pVM = pVCpu->CTX_SUFF(pVM);
     1706
    17461707    LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
    17471708
     
    17731734    {
    17741735        pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    1775         rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVM, pVCpu, GCPhysCR3);
     1736        rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
    17761737        AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
    17771738    }
     
    17881749 *
    17891750 * @returns VBox status code.
    1790  * @param   pVM         The virtual machine.
    17911751 * @param   pVCpu       VMCPU handle.
    17921752 * @param   cr0         Guest context CR0 register
     
    17951755 * @param   fGlobal     Including global page directories or not
    17961756 */
    1797 VMMDECL(int) PGMSyncCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
    1798 {
     1757VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
     1758{
     1759    PVM pVM = pVCpu->CTX_SUFF(pVM);
    17991760    int rc;
    18001761
     
    18591820        {
    18601821            pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
    1861             rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVM, pVCpu, GCPhysCR3);
     1822            rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
    18621823        }
    18631824#ifdef IN_RING3
     
    18791840     */
    18801841    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
    1881     rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVM, pVCpu, cr0, cr3, cr4, fGlobal);
     1842    rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
    18821843    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
    18831844    AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
     
    19201881 * @retval  VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
    19211882 *
    1922  * @param   pVM         VM handle.
    19231883 * @param   pVCpu       VMCPU handle.
    19241884 * @param   cr0         The new cr0.
     
    19261886 * @param   efer        The new extended feature enable register.
    19271887 */
    1928 VMMDECL(int) PGMChangeMode(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
    1929 {
     1888VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
     1889{
     1890    PVM pVM = pVCpu->CTX_SUFF(pVM);
    19301891    PGMMODE enmGuestMode;
    19311892
     
    25372498{
    25382499    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
    2539     unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVM, pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
     2500    unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
    25402501    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
    25412502    return cErrors;
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r18988 r18992  
    2626*******************************************************************************/
    2727__BEGIN_DECLS
    28 PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
    29 PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage);
    30 PGM_BTH_DECL(int, SyncPage)(PVM pVM, PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
    31 PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
    32 PGM_BTH_DECL(int, SyncPT)(PVM pVM, PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
    33 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr);
    34 PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage);
    35 PGM_BTH_DECL(int, SyncCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
     28PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
     29PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
     30PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
     31PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
     32PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
     33PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr);
     34PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
     35PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
    3636#ifdef VBOX_STRICT
    37 PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
     37PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
    3838#endif
    3939#ifdef PGMPOOL_WITH_USER_TRACKING
    40 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
     40DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys);
    4141#endif
    42 PGM_BTH_DECL(int, MapCR3)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
    43 PGM_BTH_DECL(int, UnmapCR3)(PVM pVM, PVMCPU pVCpu);
     42PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
     43PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu);
    4444__END_DECLS
    4545
     
    7676 * @returns VBox status code (appropriate for trap handling and GC return).
    7777 *
    78  * @param   pVM         VM Handle.
    7978 * @param   pVCpu       VMCPU Handle.
    8079 * @param   uErr        The trap error code.
     
    8281 * @param   pvFault     The fault address.
    8382 */
    84 PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
     83PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
    8584{
     85    PVM pVM = pVCpu->CTX_SUFF(pVM);
     86
    8687# if defined(IN_RC) && defined(VBOX_STRICT)
    8788    PGMDynCheckLocks(pVM);
     
    164165    PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
    165166#    endif
    166     rc = pgmShwSyncPaePDPtr(pVM, pVCpu, pvFault, &PdpeSrc, &pPDDst);
     167    rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, &PdpeSrc, &pPDDst);
    167168    if (rc != VINF_SUCCESS)
    168169    {
     
    186187#   endif
    187188
    188     rc = pgmShwSyncLongModePDPtr(pVM, pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
     189    rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, pPml4eSrc, &PdpeSrc, &pPDDst);
    189190    if (rc != VINF_SUCCESS)
    190191    {
     
    198199    PEPTPD          pPDDst;
    199200
    200     rc = pgmShwGetEPTPDPtr(pVM, pVCpu, pvFault, NULL, &pPDDst);
     201    rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
    201202    if (rc != VINF_SUCCESS)
    202203    {
     
    213214     */
    214215    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    215     rc = PGM_BTH_NAME(CheckPageFault)(pVM, pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
     216    rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault);
    216217    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e);
    217218    if (    rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT
     
    257258        STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeSyncPT, f);
    258259        LogFlow(("=>SyncPT %04x = %08x\n", iPDSrc, PdeSrc.au32[0]));
    259         rc = PGM_BTH_NAME(SyncPT)(pVM, pVCpu, iPDSrc, pPDSrc, pvFault);
     260        rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, pvFault);
    260261        if (RT_SUCCESS(rc))
    261262        {
     
    411412                                && !(uErr & X86_TRAP_PF_P))
    412413                            {
    413                                 rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
     414                                rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    414415                                if (    RT_FAILURE(rc)
    415416                                    || !(uErr & X86_TRAP_PF_RW)
     
    457458                            && !(uErr & X86_TRAP_PF_P))
    458459                        {
    459                             rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
     460                            rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    460461                            if (    RT_FAILURE(rc)
    461462                                ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
     
    551552                        &&  !(uErr & X86_TRAP_PF_P))
    552553                    {
    553                         rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
     554                        rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    554555                        if (    RT_FAILURE(rc)
    555556                            ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
     
    685686                                 */
    686687                                LogFlow(("CSAM ring 3 job\n"));
    687                                 int rc2 = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, pvFault, 1, uErr);
     688                                int rc2 = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);
    688689                                AssertRC(rc2);
    689690
     
    733734                }
    734735#   endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(IN_RING0) */
    735                 rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
     736                rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, PGM_SYNC_NR_PAGES, uErr);
    736737                if (RT_SUCCESS(rc))
    737738                {
     
    778779                     *       page is not present, which is not true in this case.
    779780                     */
    780                     rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, pvFault, 1, uErr);
     781                    rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, pvFault, 1, uErr);
    781782                    if (RT_SUCCESS(rc))
    782783                    {
     
    904905 * @returns VBox status code.
    905906 *
    906  * @param   pVM         VM handle.
    907907 * @param   pVCpu       The VMCPU handle.
    908908 * @param   GCPtrPage   Page to invalidate.
     
    915915 * @todo    Add a #define for simply invalidating the page.
    916916 */
    917 PGM_BTH_DECL(int, InvalidatePage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)
     917PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage)
    918918{
    919919#if    PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)   \
    920920    && PGM_SHW_TYPE != PGM_TYPE_NESTED \
    921921    && PGM_SHW_TYPE != PGM_TYPE_EPT
    922     int             rc;
     922    int rc;
     923    PVM pVM = pVCpu->CTX_SUFF(pVM);
    923924
    924925    LogFlow(("InvalidatePage %RGv\n", GCPtrPage));
     
    967968    PX86PDPT        pPdptDst;
    968969    PX86PML4E       pPml4eDst;
    969     rc = pgmShwGetLongModePDPtr(pVM, pVCpu, GCPtrPage, &pPml4eDst, &pPdptDst, &pPDDst);
     970    rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eDst, &pPdptDst, &pPDDst);
    970971    if (rc != VINF_SUCCESS)
    971972    {
     
    11621163            Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    11631164            Assert(PGMGetGuestMode(pVCpu) <= PGMMODE_PAE);
    1164             rc = PGM_BTH_NAME(SyncPT)(pVM, pVCpu, iPDSrc, pPDSrc, GCPtrPage);
     1165            rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
    11651166        }
    11661167        else if (   PdeSrc.n.u1User != PdeDst.n.u1User
     
    12091210#  ifdef PGMPOOL_WITH_USER_TRACKING
    12101211                    /* This is very unlikely with caching/monitoring enabled. */
    1211                     PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
     1212                    PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
    12121213#  endif
    12131214                    pPT->a[iPTEDst].u = 0;
    12141215                }
    12151216# else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
    1216                 rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, GCPtrPage, 1, 0);
     1217                rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
    12171218                if (RT_SUCCESS(rc))
    12181219                    rc = VINF_SUCCESS;
     
    13151316 * Update the tracking of shadowed pages.
    13161317 *
    1317  * @param   pVM         The VM handle.
    13181318 * @param   pVCpu       The VMCPU handle.
    13191319 * @param   pShwPage    The shadow page.
    13201320 * @param   HCPhys      The physical page we is being dereferenced.
    13211321 */
    1322 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
     1322DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys)
    13231323{
    13241324# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
     1325    PVM pVM = pVCpu->CTX_SUFF(pVM);
     1326
    13251327    STAM_PROFILE_START(&pVM->pgm.s.StatTrackDeref, a);
    13261328    LogFlow(("SyncPageWorkerTrackDeref: Damn HCPhys=%RHp pShwPage->idx=%#x!!!\n", HCPhys, pShwPage->idx));
     
    13641366 * Update the tracking of shadowed pages.
    13651367 *
    1366  * @param   pVM         The VM handle.
    13671368 * @param   pVCpu       The VMCPU handle.
    13681369 * @param   pShwPage    The shadow page.
     
    13711372 * @param   iPTDst      The index into the shadow table.
    13721373 */
    1373 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
     1374DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackAddref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, uint16_t u16, PPGMPAGE pPage, const unsigned iPTDst)
    13741375{
     1376    PVM pVM = pVCpu->CTX_SUFF(pVM);
    13751377# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
    13761378    /*
     
    14071409 * will be mapped in this function.
    14081410 *
    1409  * @param   pVM         VM handle.
    14101411 * @param   pVCpu       The VMCPU handle.
    14111412 * @param   pPteDst     Destination page table entry.
     
    14181419 * @remark  Not used for 2/4MB pages!
    14191420 */
    1420 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVM pVM, PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
     1421DECLINLINE(void) PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst)
    14211422{
    14221423    if (PteSrc.n.u1Present)
    14231424    {
     1425        PVM pVM = pVCpu->CTX_SUFF(pVM);
     1426
    14241427        /*
    14251428         * Find the ram range.
     
    15311534            {
    15321535                if (!pPteDst->n.u1Present)
    1533                     PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
     1536                    PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
    15341537                else if ((pPteDst->u & SHW_PTE_PG_MASK) != (PteDst.u & SHW_PTE_PG_MASK))
    15351538                {
    15361539                    Log2(("SyncPageWorker: deref! *pPteDst=%RX64 PteDst=%RX64\n", (uint64_t)pPteDst->u, (uint64_t)PteDst.u));
    1537                     PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
    1538                     PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
     1540                    PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
     1541                    PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
    15391542                }
    15401543            }
     
    15421545            {
    15431546                Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
    1544                 PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
     1547                PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
    15451548            }
    15461549#endif /* PGMPOOL_WITH_USER_TRACKING */
     
    15691572        {
    15701573            Log2(("SyncPageWorker: deref! *pPteDst=%RX64\n", (uint64_t)pPteDst->u));
    1571             PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
     1574            PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVCpu, pShwPage, pPteDst->u & SHW_PTE_PG_MASK);
    15721575        }
    15731576#endif /* PGMPOOL_WITH_USER_TRACKING */
     
    15861589 * @returns VBox status code.
    15871590 * @returns VINF_PGM_SYNCPAGE_MODIFIED_PDE if it modifies the PDE in any way.
    1588  * @param   pVM         VM handle.
    15891591 * @param   pVCpu       The VMCPU handle.
    15901592 * @param   PdeSrc      Page directory entry of the guest.
     
    15931595 * @param   uErr        Fault error (X86_TRAP_PF_*).
    15941596 */
    1595 PGM_BTH_DECL(int, SyncPage)(PVM pVM, PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
     1597PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
    15961598{
     1599    PVM pVM = pVCpu->CTX_SUFF(pVM);
    15971600    LogFlow(("SyncPage: GCPtrPage=%RGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr));
    15981601
     
    16441647    PX86PDPT        pPdptDst;
    16451648
    1646     int rc = pgmShwGetLongModePDPtr(pVM, pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
     1649    int rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
    16471650    AssertRCSuccessReturn(rc, rc);
    16481651    Assert(pPDDst && pPdptDst);
     
    17621765                                   )
    17631766#endif /* else: CSAM not active */
    1764                                     PGM_BTH_NAME(SyncPageWorker)(pVM, pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
     1767                                    PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
    17651768                                Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
    17661769                                      GCPtrCurPage, PteSrc.n.u1Present,
     
    17791782                        GSTPTE PteSrc = pPTSrc->a[iPTSrc];
    17801783                        const unsigned iPTDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
    1781                         PGM_BTH_NAME(SyncPageWorker)(pVM, pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
     1784                        PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
    17821785                        Log2(("SyncPage: 4K  %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s\n",
    17831786                              GCPtrPage, PteSrc.n.u1Present,
     
    18341837# ifdef PGMPOOL_WITH_USER_TRACKING
    18351838                    if (PteDst.n.u1Present && !pPTDst->a[iPTDst].n.u1Present)
    1836                         PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
     1839                        PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
    18371840# endif
    18381841                    /* Make sure only allocated pages are mapped writable. */
     
    19311934    PX86PDPT        pPdptDst;
    19321935
    1933     int rc = pgmShwGetLongModePDPtr(pVM, pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
     1936    int rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
    19341937    AssertRCSuccessReturn(rc, rc);
    19351938    Assert(pPDDst && pPdptDst);
     
    19401943    EPTPDE          PdeDst;
    19411944
    1942     int rc = pgmShwGetEPTPDPtr(pVM, pVCpu, GCPtrPage, NULL, &pPDDst);
     1945    int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtrPage, NULL, &pPDDst);
    19431946    if (rc != VINF_SUCCESS)
    19441947    {
     
    19871990                PteSrc.n.u1User     = 1;
    19881991
    1989                 PGM_BTH_NAME(SyncPageWorker)(pVM, pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
     1992                PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
    19901993
    19911994                Log2(("SyncPage: 4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx} PteDst=%08llx%s\n",
     
    20182021        PteSrc.n.u1Write    = 1;
    20192022        PteSrc.n.u1User     = 1;
    2020         PGM_BTH_NAME(SyncPageWorker)(pVM, pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
     2023        PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
    20212024
    20222025        Log2(("SyncPage: 4K  %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}PteDst=%08llx%s\n",
     
    20432046 *
    20442047 * @returns VBox status code.
    2045  * @param   pVM         VM handle.
    20462048 * @param   pVCpu       The VMCPU handle.
    20472049 * @param   uErr        Page fault error code.
     
    20502052 * @param   GCPtrPage   Guest context page address.
    20512053 */
    2052 PGM_BTH_DECL(int, CheckPageFault)(PVM pVM, PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
     2054PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
    20532055{
    20542056    bool fWriteProtect      = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP);
     
    20652067    unsigned uPageFaultLevel;
    20662068    int rc;
     2069    PVM pVM = pVCpu->CTX_SUFF(pVM);
    20672070
    20682071    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
     
    23562359 *
    23572360 * @returns VBox status code.
    2358  * @param   pVM         VM handle.
    23592361 * @param   pVCpu       The VMCPU handle.
    23602362 * @param   iPD         Page directory index.
     
    23632365 * @param   GCPtrPage   GC Pointer of the page that caused the fault
    23642366 */
    2365 PGM_BTH_DECL(int, SyncPT)(PVM pVM, PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
     2367PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
    23662368{
     2369    PVM pVM = pVCpu->CTX_SUFF(pVM);
     2370
    23672371    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
    23682372    STAM_COUNTER_INC(&pVCpu->pgm.s.StatSyncPtPD[iPDSrc]);
     
    24082412    PX86PDPAE       pPDDst;
    24092413    PX86PDPT        pPdptDst;
    2410     rc = pgmShwGetLongModePDPtr(pVM, pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
     2414    rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
    24112415    AssertRCSuccessReturn(rc, rc);
    24122416    Assert(pPDDst);
     
    26172621                           )
    26182622# endif
    2619                             PGM_BTH_NAME(SyncPageWorker)(pVM, pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
     2623                            PGM_BTH_NAME(SyncPageWorker)(pVCpu, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
    26202624                        Log2(("SyncPT:   4K+ %RGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%RGp\n",
    26212625                              (RTGCPTR)((iPDSrc << GST_PD_SHIFT) | (iPTSrc << PAGE_SHIFT)),
     
    27472751# ifdef PGMPOOL_WITH_USER_TRACKING
    27482752                        if (PteDst.n.u1Present)
    2749                             PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVM, pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
     2753                            PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPage, PGM_PAGE_GET_TRACKING(pPage), pPage, iPTDst);
    27502754# endif
    27512755                        /* commit it */
     
    28262830    PX86PDPAE       pPDDst;
    28272831    PX86PDPT        pPdptDst;
    2828     rc = pgmShwGetLongModePDPtr(pVM, pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
     2832    rc = pgmShwGetLongModePDPtr(pVCpu, GCPtrPage, NULL, &pPdptDst, &pPDDst);
    28292833    AssertRCSuccessReturn(rc, rc);
    28302834    Assert(pPDDst);
     
    28412845    PEPTPDPT        pPdptDst;
    28422846
    2843     rc = pgmShwGetEPTPDPtr(pVM, pVCpu, GCPtrPage, &pPdptDst, &pPDDst);
     2847    rc = pgmShwGetEPTPDPtr(pVCpu, GCPtrPage, &pPdptDst, &pPDDst);
    28442848    if (rc != VINF_SUCCESS)
    28452849    {
     
    28952899    *pPdeDst = PdeDst;
    28962900
    2897     rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
     2901    rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, PGM_SYNC_NR_PAGES, 0 /* page not present */);
    28982902    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
    28992903    return rc;
     
    29152919 *
    29162920 * @returns VBox status code.
    2917  * @param   pVM         VM handle.
    29182921 * @param   pVCpu       The VMCPU handle.
    29192922 * @param   GCPtrPage   Page to invalidate.
    29202923 */
    2921 PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage)
     2924PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage)
    29222925{
    29232926#if   (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \
     
    29732976        PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
    29742977#   endif
    2975         int rc = pgmShwSyncPaePDPtr(pVM, pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);
     2978        int rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);
    29762979        if (rc != VINF_SUCCESS)
    29772980        {
     
    29983001#  endif
    29993002
    3000         int rc = pgmShwSyncLongModePDPtr(pVM, pVCpu, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
     3003        int rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
    30013004        if (rc != VINF_SUCCESS)
    30023005        {
     
    30113014            if (!PdeDst.n.u1Present)
    30123015                /** r=bird: This guy will set the A bit on the PDE, probably harmless. */
    3013                 rc = PGM_BTH_NAME(SyncPT)(pVM, pVCpu, iPDSrc, pPDSrc, GCPtrPage);
     3016                rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
    30143017            else
    30153018            {
     
    30183021                 *        makes no sense to prefetch more than one page.
    30193022                 */
    3020                 rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, GCPtrPage, 1, 0);
     3023                rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
    30213024                if (RT_SUCCESS(rc))
    30223025                    rc = VINF_SUCCESS;
     
    30383041 *
    30393042 * @returns VBox status code (informational included).
    3040  * @param   pVM         VM handle.
    30413043 * @param   pVCpu       The VMCPU handle.
    30423044 * @param   GCPtrPage   The address of the page to sync.
     
    30443046 * @param   uErr        The trap error code.
    30453047 */
    3046 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
     3048PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
    30473049{
     3050    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3051
    30483052    LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));
    30493053
     
    31133117    PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
    31143118#   endif
    3115     rc = pgmShwSyncPaePDPtr(pVM, pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);
     3119    rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, &PdpeSrc, &pPDDst);
    31163120    if (rc != VINF_SUCCESS)
    31173121    {
     
    31383142#  endif
    31393143
    3140     rc = pgmShwSyncLongModePDPtr(pVM, pVCpu, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
     3144    rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc, &PdpeSrc, &pPDDst);
    31413145    if (rc != VINF_SUCCESS)
    31423146    {
     
    31543158        PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
    31553159# endif
    3156         rc = PGM_BTH_NAME(SyncPT)(pVM, pVCpu, iPDSrc, pPDSrc, GCPtrPage);
     3160        rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
    31573161# if defined(IN_RC)
    31583162        /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
     
    31663170# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    31673171    /* Check for dirty bit fault */
    3168     rc = PGM_BTH_NAME(CheckPageFault)(pVM, pVCpu, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
     3172    rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
    31693173    if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
    31703174        Log(("PGMVerifyAccess: success (dirty)\n"));
     
    31883192            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
    31893193
    3190         rc = PGM_BTH_NAME(SyncPage)(pVM, pVCpu, PdeSrc, GCPtrPage, 1, 0);
     3194        rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
    31913195        if (RT_SUCCESS(rc))
    31923196        {
     
    32623266 *
    32633267 * @returns VBox status code, no specials.
    3264  * @param   pVM         VM handle.
    32653268 * @param   pVCpu       The VMCPU handle.
    32663269 * @param   cr0         Guest context CR0 register
     
    32693272 * @param   fGlobal     Including global page directories or not
    32703273 */
    3271 PGM_BTH_DECL(int, SyncCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
     3274PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
    32723275{
     3276    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3277
    32733278    if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
    32743279        fGlobal = true; /* Change this CR3 reload to be a global one. */
     
    33433348 *
    33443349 * @returns VBox status code (VINF_SUCCESS).
    3345  * @param   pVM         The VM handle.
    33463350 * @param   cr3         The root of the hierarchy.
    33473351 * @param   crr         The cr4, only PAE and PSE is currently used.
     
    33673371 * @param   cb          How much to check. Defaults to everything.
    33683372 */
    3369 PGM_BTH_DECL(unsigned, AssertCR3)(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
     3373PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb)
    33703374{
    33713375#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
    33723376    return 0;
    33733377#else
    3374     unsigned    cErrors = 0;
     3378    unsigned cErrors = 0;
     3379    PVM      pVM     = pVCpu->CTX_SUFF(pVM);
    33753380
    33763381#if PGM_GST_TYPE == PGM_TYPE_PAE
     
    35223527            PGSTPD          pPDSrc    = pgmGstGetLongModePDPtr(&pVCpu->pgm.s, GCPtr, &pPml4eSrc, &PdpeSrc, &iPDSrc);
    35233528
    3524             rc = pgmShwGetLongModePDPtr(pVM, pVCpu, GCPtr, NULL, &pPdptDst, &pPDDst);
     3529            rc = pgmShwGetLongModePDPtr(pVCpu, GCPtr, NULL, &pPdptDst, &pPDDst);
    35253530            if (rc != VINF_SUCCESS)
    35263531            {
     
    41194124 * @retval  VINF_SUCCESS.
    41204125 *
    4121  * @param   pVM             VM handle.
    41224126 * @param   pVCpu       The VMCPU handle.
    41234127 * @param   GCPhysCR3       The physical address in the CR3 register.
    41244128 */
    4125 PGM_BTH_DECL(int, MapCR3)(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
     4129PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)
    41264130{
     4131    PVM pVM = pVCpu->CTX_SUFF(pVM);
     4132
    41274133    /* Update guest paging info. */
    41284134#if PGM_GST_TYPE == PGM_TYPE_32BIT \
     
    43304336 *
    43314337 * @returns VBox status, no specials.
    4332  * @param   pVM         VM handle.
    43334338 * @param   pVCpu       The VMCPU handle.
    43344339 */
    4335 PGM_BTH_DECL(int, UnmapCR3)(PVM pVM, PVMCPU pVCpu)
     4340PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu)
    43364341{
    43374342    LogFlow(("UnmapCR3\n"));
    43384343
    4339     int rc = VINF_SUCCESS;
     4344    int rc  = VINF_SUCCESS;
     4345    PVM pVM = pVCpu->CTX_SUFF(pVM);
    43404346
    43414347    /*
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r18927 r18992  
    299299                            GstPdpe.u = X86_PDPE_P;
    300300                    }
    301                     int rc = pgmShwSyncPaePDPtr(pVM, pVCpu, iPdPt << X86_PDPT_SHIFT, &GstPdpe, &pShwPaePd);
     301                    int rc = pgmShwSyncPaePDPtr(pVCpu, iPdPt << X86_PDPT_SHIFT, &GstPdpe, &pShwPaePd);
    302302                    AssertFatalRC(rc);
    303303                }
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r18988 r18992  
    38203820
    38213821    /* Unmap the old CR3 value before flushing everything. */
    3822     int rc = PGM_BTH_PFN(UnmapCR3, pVCpu)(pVM, pVCpu);
     3822    int rc = PGM_BTH_PFN(UnmapCR3, pVCpu)(pVCpu);
    38233823    AssertRC(rc);
    38243824
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r18988 r18992  
    191191    EPTPDE          Pde;
    192192
    193     int rc = pgmShwGetEPTPDPtr(pVM, pVCpu, GCPtr, NULL, &pPDDst);
     193    int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
    194194    if (rc != VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
    195195    {
     
    328328        EPTPDE          Pde;
    329329
    330         rc = pgmShwGetEPTPDPtr(pVM, pVCpu, GCPtr, NULL, &pPDDst);
     330        rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
    331331        if (rc != VINF_SUCCESS)
    332332        {
  • trunk/src/VBox/VMM/VMMAll/SELMAll.cpp

    r18927 r18992  
    10241024                /** @todo might cross page boundary */
    10251025                fTriedAlready = true;
    1026                 rc = PGMPrefetchPage(pVM, pVCpu, (RTGCPTR)GCPtrTss);
     1026                rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPtrTss);
    10271027                if (rc != VINF_SUCCESS)
    10281028                    return rc;
  • trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp

    r18927 r18992  
    458458            /* The page might be out of sync. */ /** @todo might cross a page boundary) */
    459459            Log(("Page %RGv out of sync -> prefetch and try again\n", pIDTEntry));
    460             rc = PGMPrefetchPage(pVM, pVCpu, pIDTEntry); /** @todo r=bird: rainy day: this isn't entirely safe because of access bit virtualiziation and CSAM. */
     460            rc = PGMPrefetchPage(pVCpu, pIDTEntry); /** @todo r=bird: rainy day: this isn't entirely safe because of access bit virtualiziation and CSAM. */
    461461            if (rc != VINF_SUCCESS)
    462462            {
     
    518518                    /* The page might be out of sync. */ /** @todo might cross a page boundary) */
    519519                    Log(("Page %RGv out of sync -> prefetch and try again\n", pGdtEntry));
    520                     rc = PGMPrefetchPage(pVM, pVCpu, pGdtEntry);  /** @todo r=bird: rainy day: this isn't entirely safe because of access bit virtualiziation and CSAM. */
     520                    rc = PGMPrefetchPage(pVCpu, pGdtEntry);  /** @todo r=bird: rainy day: this isn't entirely safe because of access bit virtualiziation and CSAM. */
    521521                    if (rc != VINF_SUCCESS)
    522522                    {
     
    588588                Assert(eflags.Bits.u1VM || (pRegFrame->ss & X86_SEL_RPL) != 0);
    589589                /* Check maximum amount we need (10 when executing in V86 mode) */
    590                 rc = PGMVerifyAccess(pVM, pVCpu, (RTGCUINTPTR)pTrapStackGC - 10*sizeof(uint32_t), 10 * sizeof(uint32_t), X86_PTE_RW);
     590                rc = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)pTrapStackGC - 10*sizeof(uint32_t), 10 * sizeof(uint32_t), X86_PTE_RW);
    591591                pTrapStack = (uint32_t *)pTrapStackGC;
    592592#else
  • trunk/src/VBox/VMM/VMMGC/SELMGC.cpp

    r18927 r18992  
    276276
    277277    /** @todo use different fallback?    */
    278     rc = PGMPrefetchPage(pVM, pVCpu, (uintptr_t)pvSrc);
     278    rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
    279279    AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
    280280    if (rc == VINF_SUCCESS)
  • trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp

    r18927 r18992  
    229229        else if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    230230#if 1
    231             rc = PGMSyncCR3(pVM, pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     231            rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu), CPUMGetGuestCR4(pVCpu), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    232232#else
    233233            rc = VINF_PGM_SYNC_CR3;
     
    990990     * This is all PGM stuff.
    991991     */
    992     int rc = PGMTrap0eHandler(pVM, pVCpu, pTrpm->uActiveErrorCode, pRegFrame, (RTGCPTR)pTrpm->uActiveCR2);
     992    int rc = PGMTrap0eHandler(pVCpu, pTrpm->uActiveErrorCode, pRegFrame, (RTGCPTR)pTrpm->uActiveCR2);
    993993    switch (rc)
    994994    {
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r18927 r18992  
    12361236    {
    12371237        CPUMSetGuestCR3(pVCpu, pVMCB->guest.u64CR3);
    1238         PGMUpdateCR3(pVM, pVCpu, pVMCB->guest.u64CR3);
     1238        PGMUpdateCR3(pVCpu, pVMCB->guest.u64CR3);
    12391239    }
    12401240
     
    14131413
    14141414            /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
    1415             rc = PGMTrap0eHandler(pVM, pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
     1415            rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);
    14161416            Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc));
    14171417            if (rc == VINF_SUCCESS)
     
    17211721            &&  VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    17221722        {
    1723             rc = PGMSyncCR3(pVM, pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     1723            rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    17241724            AssertRC(rc);
    17251725
     
    21722172
    21732173
    2174 static int svmR0InterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
     2174static int svmR0InterpretInvlPg(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
    21752175{
    21762176    OP_PARAMVAL param1;
     
    21972197     * (in absence of segment override prefixes)????
    21982198     */
    2199     rc = PGMInvalidatePage(pVM, pVCpu, addr);
     2199    rc = PGMInvalidatePage(pVCpu, addr);
    22002200    if (RT_SUCCESS(rc))
    22012201    {
     
    22442244            {
    22452245                Assert(cbOp == Cpu.opsize);
    2246                 rc = svmR0InterpretInvlPg(pVM, pVCpu, &Cpu, pRegFrame, uASID);
     2246                rc = svmR0InterpretInvlPg(pVCpu, &Cpu, pRegFrame, uASID);
    22472247                if (RT_SUCCESS(rc))
    22482248                {
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r18984 r18992  
    17241724        {
    17251725            CPUMSetGuestCR3(pVCpu, val);
    1726             PGMUpdateCR3(pVM, pVCpu, val);
     1726            PGMUpdateCR3(pVCpu, val);
    17271727        }
    17281728        /* Prefetch the four PDPT entries in PAE mode. */
     
    23892389
    23902390                /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
    2391                 rc = PGMTrap0eHandler(pVM, pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);
     2391                rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);
    23922392                Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc));
    23932393                if (rc == VINF_SUCCESS)
     
    30303030                &&  VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    30313031            {
    3032                 rc = PGMSyncCR3(pVM, pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
     3032                rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    30333033                AssertRC(rc);
    30343034            }
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r18927 r18992  
    234234    {
    235235    case PGMMODE_32_BIT:
    236         rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault);
     236        rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
    237237        break;
    238238    case PGMMODE_PAE:
    239239    case PGMMODE_PAE_NX:
    240         rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault);
     240        rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
    241241        break;
    242242    case PGMMODE_AMD64:
    243243    case PGMMODE_AMD64_NX:
    244         rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault);
     244        rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
    245245        break;
    246246    case PGMMODE_EPT:
    247         rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault);
     247        rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, pvFault);
    248248        break;
    249249    default:
  • trunk/src/VBox/VMM/VMMR0/PGMR0Bth.h

    r18927 r18992  
    2525*******************************************************************************/
    2626__BEGIN_DECLS
    27 PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
     27PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
    2828__END_DECLS
    2929
  • trunk/src/VBox/VMM/VMMTests.cpp

    r18927 r18992  
    501501    pHyperCtx->cr0 = X86_CR0_PE | X86_CR0_WP | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
    502502    pHyperCtx->cr4 = X86_CR4_PGE | X86_CR4_OSFSXR | X86_CR4_OSXMMEEXCPT;
    503     PGMChangeMode(pVM, pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
    504     PGMSyncCR3(pVM, pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);
     503    PGMChangeMode(pVCpu, pHyperCtx->cr0, pHyperCtx->cr4, pHyperCtx->msrEFER);
     504    PGMSyncCR3(pVCpu, pHyperCtx->cr0, CR3Phys, pHyperCtx->cr4, true);
    505505
    506506    VM_FF_CLEAR(pVM, VM_FF_TO_R3);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette