VirtualBox

Changeset 56013 in vbox for trunk/src


Ignore:
Timestamp:
May 21, 2015 5:04:14 PM (10 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
100526
Message:

PGM: Made the virtual handler callbacks return VBOXSTRICTRC and prepared for RC execution.

Location:
trunk/src/VBox/VMM
Files:
17 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CSAMAll.cpp

    r55937 r56013  
    7272 * @param   pvUser          User argument.
    7373 */
    74 PGM_ALL_CB2_DECL(int) csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
    75                                                PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
     74PGM_ALL_CB2_DECL(VBOXSTRICTRC)
     75csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
     76                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
    7677{
    7778    RTGCPTR const GCPtrMonitored = (uintptr_t)pvUser | (GCPtr & PAGE_OFFSET_MASK);
  • trunk/src/VBox/VMM/VMMAll/PATMAll.cpp

    r55937 r56013  
    5858 * @param   pvUser          The address of the guest page we're monitoring.
    5959 */
    60 PGM_ALL_CB2_DECL(int) patmVirtPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
    61                                           PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
     60PGM_ALL_CB2_DECL(VBOXSTRICTRC) patmVirtPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
     61                                                   PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
    6262{
    6363    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r55988 r56013  
    719719                    &&  uErr & X86_TRAP_PF_RW)
    720720                {
     721                    VBOXSTRICTRC rcStrict;
    721722#   ifdef IN_RC
    722723                    STAM_PROFILE_START(&pCur->Stat, h);
     
    724725                    void *pvUser = pCur->CTX_SUFF(pvUser);
    725726                    pgmUnlock(pVM);
    726                     rc = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key,
    727                                                           pvFault - pCur->Core.Key, pvUser);
     727                    rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key,
     728                                                                pvFault - pCur->Core.Key, pvUser);
    728729                    pgmLock(pVM);
    729730                    STAM_PROFILE_STOP(&pCur->Stat, h);
    730731#   else
    731732                    AssertFailed();
    732                     rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
     733                    rcStrict = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */
    733734#   endif
    734735                    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersMapping);
    735736                    STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Mapping; });
    736                     return rc;
     737                    return VBOXSTRICTRC_TODO(rcStrict);
    737738                }
    738739
     
    816817                     ||  pCurType->enmKind != PGMVIRTHANDLERKIND_WRITE ) )
    817818            {
     819                VBOXSTRICTRC rcStrict;
    818820#   ifdef IN_RC
    819821                STAM_PROFILE_START(&pCur->Stat, h);
    820822                void *pvUser = pCur->CTX_SUFF(pvUser);
    821823                pgmUnlock(pVM);
    822                 rc = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key,
    823                                                       pvFault - pCur->Core.Key, pvUser);
     824                rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key,
     825                                                            pvFault - pCur->Core.Key, pvUser);
    824826                pgmLock(pVM);
    825827                STAM_PROFILE_STOP(&pCur->Stat, h);
    826828#   else
    827                 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
     829                rcStrict = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
    828830#   endif
    829831                STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndVirt; });
    830                 return rc;
     832                return VBOXSTRICTRC_TODO(rcStrict);
    831833            }
    832834        }
  • trunk/src/VBox/VMM/VMMAll/SELMAll.cpp

    r55899 r56013  
    4747#endif
    4848
     49
     50#ifndef IN_RING0
     51
     52# ifdef SELM_TRACK_GUEST_GDT_CHANGES
     53/**
     54 * @callback_method_impl{FNPGMVIRTHANDLER}
     55 */
     56PGM_ALL_CB2_DECL(VBOXSTRICTRC)
     57selmGuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
     58                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
     59{
     60    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
     61    Log(("selmGuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
     62    NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
     63
     64#  ifdef IN_RING3
     65    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     66    return VINF_PGM_HANDLER_DO_DEFAULT;
     67
     68#  else  /* IN_RC: */
     69    /*
     70     * Execute the write, doing necessary pre and post shadow GDT checks.
     71     */
     72    PCPUMCTX pCtx        = CPUMQueryGuestCtxPtr(pVCpu);
     73    uint32_t offGuestGdt = pCtx->gdtr.pGdt - GCPtr;
     74    selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
     75    memcpy(pvBuf, pvPtr, cbBuf);
     76    VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
     77    if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
     78        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
     79    else
     80        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
     81    return rcStrict;
     82#  endif
     83}
     84# endif
     85
     86
     87# ifdef SELM_TRACK_GUEST_LDT_CHANGES
     88/**
     89 * @callback_method_impl{FNPGMVIRTHANDLER}
     90 */
     91PGM_ALL_CB2_DECL(VBOXSTRICTRC)
     92selmGuestLDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
     93                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
     94{
     95    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
     96    Log(("selmGuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
     97    NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
     98
     99    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     100#  ifdef IN_RING3
     101    return VINF_PGM_HANDLER_DO_DEFAULT;
     102#  else
     103    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
     104    return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
     105#  endif
     106}
     107# endif
     108
     109
     110# ifdef SELM_TRACK_GUEST_TSS_CHANGES
     111/**
     112 * @callback_method_impl{FNPGMVIRTHANDLER}
     113 */
     114PGM_ALL_CB2_DECL(VBOXSTRICTRC)
     115selmGuestTSSWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
     116                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
     117{
     118    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
     119    Log(("selmGuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));
     120    NOREF(pvBuf); NOREF(GCPtr); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser); NOREF(pvPtr);
     121
     122#  ifdef IN_RING3
     123    /** @todo This can be optimized by checking for the ESP0 offset and tracking TR
     124     *        reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We
     125     *        should probably also deregister the virtual handler if TR.base/size
     126     *        changes while we're in REM.  May also share
     127     *        selmRCGuestTssPostWriteCheck code. */
     128    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     129    return VINF_PGM_HANDLER_DO_DEFAULT;
     130
     131#  else  /* IN_RC */
     132    /*
     133     * Do the write and check if anything relevant changed.
     134     */
     135    Assert(pVM->selm.s.GCPtrGuestTss != (uintptr_t)RTRCPTR_MAX);
     136    memcpy(pvPtr, pvBuf, cbBuf);
     137    return selmRCGuestTssPostWriteCheck(pVM, pVCpu, GCPtr - pVM->selm.s.GCPtrGuestTss, cbBuf);
     138#  endif
     139}
     140# endif
     141
     142#endif /* IN_RING0 */
    49143
    50144
  • trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp

    r55001 r56013  
    4141#include "internal/pgm.h"
    4242
     43
     44
     45#if defined(TRPM_TRACK_GUEST_IDT_CHANGES) && !defined(IN_RING0)
     46/**
     47 * \#PF Handler callback for virtual access handler ranges.
     48 *
     49 * Important to realize that a physical page in a range can have aliases, and
     50 * for ALL and WRITE handlers these will also trigger.
     51 *
     52 * @returns VINF_SUCCESS if the handler have carried out the operation.
     53 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
     54 * @param   pVM             Pointer to the VM.
     55 * @param   pVCpu           Pointer to the cross context CPU context for the
     56 *                          calling EMT.
     57 * @param   GCPtr           The virtual address the guest is writing to. (not correct if it's an alias!)
     58 * @param   pvPtr           The HC mapping of that address.
     59 * @param   pvBuf           What the guest is reading/writing.
     60 * @param   cbBuf           How much it's reading/writing.
     61 * @param   enmAccessType   The access type.
     62 * @param   enmOrigin       The origin of this call.
     63 * @param   pvUser          User argument.
     64 */
     65PGM_ALL_CB2_DECL(VBOXSTRICTRC)
     66trpmGuestIDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
     67                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
     68{
     69    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
     70    Log(("trpmGuestIDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
     71    NOREF(pvPtr); NOREF(pvUser); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
     72    Assert(!HMIsEnabled(pVM));
     73
     74    /** @todo Check which IDT entry and keep the update cost low in TRPMR3SyncIDT() and CSAMCheckGates(). */
     75    VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
     76# ifdef IN_RC
     77    STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTFault);
     78# endif
     79    return VINF_PGM_HANDLER_DO_DEFAULT;
     80}
     81#endif /* TRPM_TRACK_GUEST_IDT_CHANGES && !IN_RING0 */
    4382
    4483
  • trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp

    r55966 r56013  
    292292VMMR3_INT_DECL(int) PGMR3HandlerVirtualTypeRegisterEx(PVM pVM, PGMVIRTHANDLERKIND enmKind, bool fRelocUserRC,
    293293                                                      PFNPGMR3VIRTINVALIDATE pfnInvalidateR3,
    294                                                       PFNPGMR3VIRTHANDLER pfnHandlerR3,
     294                                                      PFNPGMVIRTHANDLER pfnHandlerR3,
    295295                                                      RCPTRTYPE(FNPGMRCVIRTPFHANDLER) pfnPfHandlerRC,
    296296                                                      const char *pszDesc, PPGMVIRTHANDLERTYPE phType)
     
    354354VMMR3_INT_DECL(int) PGMR3HandlerVirtualTypeRegister(PVM pVM, PGMVIRTHANDLERKIND enmKind, bool fRelocUserRC,
    355355                                                    PFNPGMR3VIRTINVALIDATE pfnInvalidateR3,
    356                                                     PFNPGMR3VIRTHANDLER pfnHandlerR3,
     356                                                    PFNPGMVIRTHANDLER pfnHandlerR3,
    357357                                                    const char *pszPfHandlerRC, const char *pszDesc,
    358358                                                    PPGMVIRTHANDLERTYPE phType)
  • trunk/src/VBox/VMM/VMMR3/SELM.cpp

    r55900 r56013  
    9595static DECLCALLBACK(int)  selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
    9696static DECLCALLBACK(int)  selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
    97 static FNPGMR3VIRTHANDLER selmR3GuestGDTWriteHandler;
    98 static FNPGMR3VIRTHANDLER selmR3GuestLDTWriteHandler;
    99 static FNPGMR3VIRTHANDLER selmR3GuestTSSWriteHandler;
    10097static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    10198static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     
    222219# endif
    223220        rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
    224                                              NULL /*pfnInvalidateR3*/, selmR3GuestGDTWriteHandler,
     221                                             NULL /*pfnInvalidateR3*/, selmGuestGDTWriteHandler,
    225222                                             "selmRCGuestGDTWritePfHandler",
    226223                                             "Guest GDT write access handler", &pVM->selm.s.hGuestGdtWriteHandlerType);
    227224        AssertRCReturn(rc, rc);
    228225        rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
    229                                              NULL /*pfnInvalidateR3*/, selmR3GuestLDTWriteHandler,
     226                                             NULL /*pfnInvalidateR3*/, selmGuestLDTWriteHandler,
    230227                                             "selmRCGuestLDTWritePfHandler",
    231228                                             "Guest LDT write access handler", &pVM->selm.s.hGuestLdtWriteHandlerType);
    232229        AssertRCReturn(rc, rc);
    233230        rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
    234                                              NULL /*pfnInvalidateR3*/, selmR3GuestTSSWriteHandler,
     231                                             NULL /*pfnInvalidateR3*/, selmGuestTSSWriteHandler,
    235232                                             "selmRCGuestTSSWritePfHandler",
    236233                                             "Guest TSS write access handler", &pVM->selm.s.hGuestTssWriteHandlerType);
     
    14791476}
    14801477
    1481 #endif /*VBOX_WITH_RAW_MODE*/
    1482 
    1483 #ifdef SELM_TRACK_GUEST_GDT_CHANGES
    1484 /**
    1485  * \#PF Handler callback for virtual access handler ranges.
    1486  *
    1487  * Important to realize that a physical page in a range can have aliases, and
    1488  * for ALL and WRITE handlers these will also trigger.
    1489  *
    1490  * @returns VINF_SUCCESS if the handler have carried out the operation.
    1491  * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
    1492  * @param   pVM             Pointer to the VM.
    1493  * @param   pVCpu           Pointer to the cross context CPU context for the
    1494  *                          calling EMT.
    1495  * @param   GCPtr           The virtual address the guest is writing to. (not correct if it's an alias!)
    1496  * @param   pvPtr           The HC mapping of that address.
    1497  * @param   pvBuf           What the guest is reading/writing.
    1498  * @param   cbBuf           How much it's reading/writing.
    1499  * @param   enmAccessType   The access type.
    1500  * @param   enmOrigin       Who is making this write.
    1501  * @param   pvUser          Unused.
    1502  */
    1503 static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
    1504                                                     PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
    1505 {
    1506     Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    1507     Log(("selmR3GuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
    1508     NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
    1509 
    1510     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    1511     return VINF_PGM_HANDLER_DO_DEFAULT;
    1512 }
    1513 #endif
    1514 
    1515 #ifdef SELM_TRACK_GUEST_LDT_CHANGES
    1516 /**
    1517  * \#PF Handler callback for virtual access handler ranges.
    1518  *
    1519  * Important to realize that a physical page in a range can have aliases, and
    1520  * for ALL and WRITE handlers these will also trigger.
    1521  *
    1522  * @returns VINF_SUCCESS if the handler have carried out the operation.
    1523  * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
    1524  * @param   pVM             Pointer to the VM.
    1525  * @param   pVCpu           Pointer to the cross context CPU context for the
    1526  *                          calling EMT.
    1527  * @param   GCPtr           The virtual address the guest is writing to. (not correct if it's an alias!)
    1528  * @param   pvPtr           The HC mapping of that address.
    1529  * @param   pvBuf           What the guest is reading/writing.
    1530  * @param   cbBuf           How much it's reading/writing.
    1531  * @param   enmAccessType   The access type.
    1532  * @param   enmOrigin       Who is making this write.
    1533  * @param   pvUser          Unused.
    1534  */
    1535 static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
    1536                                                     PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
    1537 {
    1538     Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    1539     Log(("selmR3GuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
    1540     NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
    1541 
    1542     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    1543     return VINF_PGM_HANDLER_DO_DEFAULT;
    1544 }
    1545 #endif
    1546 
    1547 
    1548 #ifdef SELM_TRACK_GUEST_TSS_CHANGES
    1549 /**
    1550  * \#PF Handler callback for virtual access handler ranges.
    1551  *
    1552  * Important to realize that a physical page in a range can have aliases, and
    1553  * for ALL and WRITE handlers these will also trigger.
    1554  *
    1555  * @returns VINF_SUCCESS if the handler have carried out the operation.
    1556  * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
    1557  * @param   pVM             Pointer to the VM.
    1558  * @param   pVCpu           Pointer to the cross context CPU context for the
    1559  *                          calling EMT.
    1560  * @param   GCPtr           The virtual address the guest is writing to. (not correct if it's an alias!)
    1561  * @param   pvPtr           The HC mapping of that address.
    1562  * @param   pvBuf           What the guest is reading/writing.
    1563  * @param   cbBuf           How much it's reading/writing.
    1564  * @param   enmAccessType   The access type.
    1565  * @param   enmOrigin       Who is making this write.
    1566  * @param   pvUser          Unused.
    1567  */
    1568 static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
    1569                                                     PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
    1570 {
    1571     Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    1572     Log(("selmR3GuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));
    1573     NOREF(pvBuf); NOREF(GCPtr); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser); NOREF(pvPtr);
    1574 
    1575     /** @todo This can be optimized by checking for the ESP0 offset and tracking TR
    1576      *        reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We
    1577      *        should probably also deregister the virtual handler if TR.base/size
    1578      *        changes while we're in REM. */
    1579 
    1580     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    1581     return VINF_PGM_HANDLER_DO_DEFAULT;
    1582 }
    1583 #endif
    1584 
    1585 #ifdef VBOX_WITH_RAW_MODE
    15861478
    15871479/**
     
    18351727VMMR3DECL(int) SELMR3DebugCheck(PVM pVM)
    18361728{
    1837 #ifdef VBOX_STRICT
     1729# ifdef VBOX_STRICT
    18381730    PVMCPU pVCpu = VMMGetCpu(pVM);
    18391731    AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE);
     
    19561848    }
    19571849
    1958 #else  /* !VBOX_STRICT */
     1850# else  /* !VBOX_STRICT */
    19591851    NOREF(pVM);
    1960 #endif /* !VBOX_STRICT */
     1852# endif /* !VBOX_STRICT */
    19611853
    19621854    return VINF_SUCCESS;
     
    19731865VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM)
    19741866{
    1975 #if defined(VBOX_STRICT) && defined(SELM_TRACK_GUEST_TSS_CHANGES)
     1867# if defined(VBOX_STRICT) && defined(SELM_TRACK_GUEST_TSS_CHANGES)
    19761868    PVMCPU pVCpu = VMMGetCpu(pVM);
    19771869
     
    20991991    return true;
    21001992
    2101 #else  /* !VBOX_STRICT */
     1993# else  /* !VBOX_STRICT */
    21021994    NOREF(pVM);
    21031995    return true;
    2104 #endif /* !VBOX_STRICT */
     1996# endif /* !VBOX_STRICT */
    21051997}
    21061998
  • trunk/src/VBox/VMM/VMMR3/TRPM.cpp

    r55900 r56013  
    424424
    425425
    426 #ifdef VBOX_WITH_RAW_MODE
    427 /** Enable or disable tracking of Guest's IDT. */
    428 # define TRPM_TRACK_GUEST_IDT_CHANGES
    429 /** Enable or disable tracking of Shadow IDT. */
    430 # define TRPM_TRACK_SHADOW_IDT_CHANGES
    431 #endif
    432 
    433426/** TRPM saved state version. */
    434427#define TRPM_SAVED_STATE_VERSION        9
     
    441434static DECLCALLBACK(int) trpmR3Save(PVM pVM, PSSMHANDLE pSSM);
    442435static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
    443 #ifdef TRPM_TRACK_GUEST_IDT_CHANGES
    444 static FNPGMR3VIRTHANDLER trpmR3GuestIDTWriteHandler;
    445 #endif
    446436
    447437
     
    523513# endif
    524514        rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/,
    525                                              NULL /*pfnInvalidateR3*/, trpmR3GuestIDTWriteHandler,
     515                                             NULL /*pfnInvalidateR3*/, trpmGuestIDTWriteHandler,
    526516                                             "trpmRCGuestIDTWritePfHandler",
    527517                                             "Guest IDT write access handler", &pVM->trpm.s.hGuestIdtWriteHandlerType);
     
    11691159    return VINF_SUCCESS;
    11701160}
    1171 
    1172 
    1173 # ifdef TRPM_TRACK_GUEST_IDT_CHANGES
    1174 /**
    1175  * \#PF Handler callback for virtual access handler ranges.
    1176  *
    1177  * Important to realize that a physical page in a range can have aliases, and
    1178  * for ALL and WRITE handlers these will also trigger.
    1179  *
    1180  * @returns VINF_SUCCESS if the handler have carried out the operation.
    1181  * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
    1182  * @param   pVM             Pointer to the VM.
    1183  * @param   pVCpu           Pointer to the cross context CPU context for the
    1184  *                          calling EMT.
    1185  * @param   GCPtr           The virtual address the guest is writing to. (not correct if it's an alias!)
    1186  * @param   pvPtr           The HC mapping of that address.
    1187  * @param   pvBuf           What the guest is reading/writing.
    1188  * @param   cbBuf           How much it's reading/writing.
    1189  * @param   enmAccessType   The access type.
    1190  * @param   enmOrigin       The origin of this call.
    1191  * @param   pvUser          User argument.
    1192  */
    1193 static DECLCALLBACK(int) trpmR3GuestIDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
    1194                                                     PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
    1195 {
    1196     Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    1197     Log(("trpmR3GuestIDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
    1198     NOREF(pvPtr); NOREF(pvUser); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);
    1199     Assert(!HMIsEnabled(pVM));
    1200 
    1201     VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    1202     return VINF_PGM_HANDLER_DO_DEFAULT;
    1203 }
    1204 # endif /* TRPM_TRACK_GUEST_IDT_CHANGES */
    12051161
    12061162
  • trunk/src/VBox/VMM/VMMRC/CSAMRC.cpp

    r55966 r56013  
    6767 * @param   pvUser      Ignored.
    6868 */
    69 DECLEXPORT(int) csamRCCodePageWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    70                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     69DECLEXPORT(VBOXSTRICTRC) csamRCCodePageWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     70                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    7171{
    7272    PPATMGCSTATE pPATMGCState;
  • trunk/src/VBox/VMM/VMMRC/PATMRC.cpp

    r55966 r56013  
    6060 * @param   pvUser      The physical address of the guest page being monitored.
    6161 */
    62 DECLEXPORT(int) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    63                                         RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     62DECLEXPORT(VBOXSTRICTRC) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     63                                                 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    6464{
    6565    NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
  • trunk/src/VBox/VMM/VMMRC/SELMRC.cpp

    r55966 r56013  
    4848
    4949#ifdef SELM_TRACK_GUEST_GDT_CHANGES
     50
    5051/**
    5152 * Synchronizes one GDT entry (guest -> shadow).
     
    5354 * @returns VBox strict status code (appropriate for trap handling and GC
    5455 *          return).
     56 * @retval  VINF_SUCCESS
    5557 * @retval  VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
    5658 * @retval  VINF_SELM_SYNC_GDT
    57  * @retval  VINF_EM_RESCHEDULE_REM
    5859 *
    5960 * @param   pVM         Pointer to the VM.
    6061 * @param   pVCpu       The current virtual CPU.
    61  * @param   pRegFrame   Trap register frame.
     62 * @param   pCtx        CPU context for the current CPU.
    6263 * @param   iGDTEntry   The GDT entry to sync.
    6364 *
    6465 * @remarks Caller checks that this isn't the LDT entry!
    6566 */
    66 static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
     67static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
    6768{
    6869    Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
     
    9091            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    9192            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
    92             return VINF_EM_RESCHEDULE_REM;
     93            /* return VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
     94            return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
    9395        }
    9496    }
     
    136138     */
    137139    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    138     PCPUMCTX     pCtx     = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
    139140    PCPUMSELREG  paSReg   = CPUMCTX_FIRST_SREG(pCtx);
    140141    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
     
    180181 * @param   pVM         Pointer to the VM.
    181182 * @param   pVCpu       The current virtual CPU.
    182  * @param   pRegFrame   Trap register frame.
     183 * @param   pCtx        The CPU context.
    183184 * @param   iGDTEntry   The GDT entry to sync.
    184185 */
    185 static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
     186void selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
    186187{
    187188    /*
     
    200201    PCX86DESC       pDesc    = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
    201202    uint32_t        uCpl     = CPUMGetGuestCPL(pVCpu);
    202     PCPUMCTX        pCtx     = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
    203203    PCPUMSELREG     paSReg   = CPUMCTX_FIRST_SREG(pCtx);
    204204    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
     
    219219        }
    220220    }
    221 
    222 }
    223 
    224 
    225 /**
    226  * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT.
    227  *
    228  * @returns VBox status code (appropriate for trap handling and GC return).
    229  * @param   pVM         Pointer to the VM.
    230  * @param   pVCpu       Pointer to the cross context CPU context for the
    231  *                      calling EMT.
    232  * @param   uErrorCode  CPU Error code.
    233  * @param   pRegFrame   Trap register frame.
    234  * @param   pvFault     The fault address (cr2).
    235  * @param   pvRange     The base address of the handled virtual range.
    236  * @param   offRange    The offset of the access into this range.
    237  *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
    238  */
    239 DECLEXPORT(int) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    240                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     221}
     222
     223
     224/**
     225 * Syncs hidden selector register parts before emulating a GDT change.
     226 *
     227 * This is shared between the selmRCGuestGDTWritePfHandler and
     228 * selmGuestGDTWriteHandler.
     229 *
     230 * @param   pVM             Pointer to the cross context VM structure.
     231 * @param   pVCpu           Pointer to the cross context virtual CPU structure.
     232 * @param   offGuestTss     The offset into the TSS of the write that was made.
     233 * @param   cbWrite         The number of bytes written.
     234 * @param   pCtx            The current CPU context.
     235 */
     236void selmRCGuestGdtPreWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
     237{
     238    uint32_t       iGdt      = offGuestGdt >> X86_SEL_SHIFT;
     239    uint32_t const iGdtLast  = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
     240    do
     241    {
     242        selmRCSyncGdtSegRegs(pVM, pVCpu, pCtx, iGdt);
     243        iGdt++;
     244    } while (iGdt <= iGdtLast);
     245}
     246
     247
     248/**
     249 * Checks the guest GDT for changes after a write has been emulated.
     250 *
     251 *
     252 * This is shared between the selmRCGuestGDTWritePfHandler and
     253 * selmGuestGDTWriteHandler.
     254 *
     255 * @retval  VINF_SUCCESS
     256 * @retval  VINF_SELM_SYNC_GDT
     257 * @retval  VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
     258 *
     259 * @param   pVM             Pointer to the cross context VM structure.
     260 * @param   pVCpu           Pointer to the cross context virtual CPU structure.
     261 * @param   offGuestTss     The offset into the TSS of the write that was made.
     262 * @param   cbWrite         The number of bytes written.
     263 * @param   pCtx            The current CPU context.
     264 */
     265VBOXSTRICTRC selmRCGuestGdtPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
     266{
     267    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
     268
     269    /* Check if the LDT was in any way affected.  Do not sync the
     270       shadow GDT if that's the case or we might have trouble in
     271       the world switcher (or so they say). */
     272    uint32_t const iGdtFirst = offGuestGdt >> X86_SEL_SHIFT;
     273    uint32_t const iGdtLast  = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
     274    uint32_t const iLdt      = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
     275    if (iGdtFirst <= iLdt && iGdtLast >= iLdt)
     276    {
     277        Log(("LDTR selector change -> fall back to HC!!\n"));
     278        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     279        rcStrict = VINF_SELM_SYNC_GDT;
     280        /** @todo Implement correct stale LDT handling.  */
     281    }
     282    else
     283    {
     284        /* Sync the shadow GDT and continue provided the update didn't
     285           cause any segment registers to go stale in any way. */
     286        uint32_t iGdt = iGdtFirst;
     287        do
     288        {
     289            VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pCtx, iGdt);
     290            Assert(rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT || rcStrict2 == VINF_SELM_SYNC_GDT);
     291            if (rcStrict == VINF_SUCCESS)
     292                rcStrict = rcStrict2;
     293            iGdt++;
     294        } while (   iGdt <= iGdtLast
     295                 && (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT));
     296        if (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT)
     297            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
     298    }
     299    return rcStrict;
     300}
     301
     302
     303/**
     304 * @callback_method_impl{FNPGMVIRTHANDLER, Guest GDT write access \#PF handler }
     305 */
     306DECLEXPORT(VBOXSTRICTRC) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     307                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    241308{
    242309    LogFlow(("selmRCGuestGDTWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
     
    246313     * Check if any selectors might be affected.
    247314     */
    248     unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT;
    249     selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1);
    250     if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1)
    251         selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1);
     315    selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offRange, 8 /*cbWrite*/, CPUMCTX_FROM_CORE(pRegFrame));
    252316
    253317    /*
     
    257321    VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
    258322    if (RT_SUCCESS(rcStrict) && cb)
    259     {
    260         /* Check if the LDT was in any way affected.  Do not sync the
    261            shadow GDT if that's the case or we might have trouble in
    262            the world switcher (or so they say). */
    263         unsigned const iLdt   = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
    264         unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT;
    265         if (   iGDTE1 == iLdt
    266             || iGDTE2 == iLdt)
    267         {
    268             Log(("LDTR selector change -> fall back to HC!!\n"));
    269             VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    270             rcStrict = VINF_SELM_SYNC_GDT;
    271             /** @todo Implement correct stale LDT handling.  */
    272         }
    273         else
    274         {
    275             /* Sync the shadow GDT and continue provided the update didn't
    276                cause any segment registers to go stale in any way. */
    277             VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1);
    278             if (rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RESCHEDULE_REM)
    279             {
    280                 if (rcStrict == VINF_SUCCESS)
    281                     rcStrict = rcStrict2;
    282 
    283                 if (iGDTE1 != iGDTE2)
    284                 {
    285                     rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2);
    286                     if (rcStrict == VINF_SUCCESS)
    287                         rcStrict = rcStrict2;
    288                 }
    289 
    290                 if (rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RESCHEDULE_REM)
    291                 {
    292                     /* VINF_EM_RESCHEDULE_REM - bad idea if we're in a patch. */
    293                     if (rcStrict2 == VINF_EM_RESCHEDULE_REM)
    294                         rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
    295                     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
    296                     return VBOXSTRICTRC_TODO(rcStrict);
    297                 }
    298             }
    299 
    300             /* sync failed, return to ring-3 and resync the GDT. */
    301             if (rcStrict == VINF_SUCCESS || RT_FAILURE(rcStrict2))
    302                 rcStrict = rcStrict2;
    303         }
    304     }
     323        rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offRange, cb, CPUMCTX_FROM_CORE(pRegFrame));
    305324    else
    306325    {
     
    311330    }
    312331
    313     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
    314     return VBOXSTRICTRC_TODO(rcStrict);
    315 }
     332    if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
     333        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
     334    else
     335        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
     336    return rcStrict;
     337}
     338
    316339#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
    317340
    318 
    319341#ifdef SELM_TRACK_GUEST_LDT_CHANGES
    320342/**
    321  * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
     343 * @callback_method_impl{FNPGMVIRTHANDLER, Guest LDT write access \#PF handler }
     344 */
     345DECLEXPORT(VBOXSTRICTRC) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     346                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     347{
     348    /** @todo To be implemented... or not. */
     349    ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     350    NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
     351
     352    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     353    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
     354    return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
     355}
     356#endif
     357
     358
     359#ifdef SELM_TRACK_GUEST_TSS_CHANGES
     360
     361/**
     362 * Read wrapper used by selmRCGuestTSSWriteHandler.
     363 * @returns VBox status code (appropriate for trap handling and GC return).
     364 * @param   pVM         Pointer to the VM.
     365 * @param   pvDst       Where to put the bits we read.
     366 * @param   pvSrc       Guest address to read from.
     367 * @param   cb          The number of bytes to read.
     368 */
     369DECLINLINE(int) selmRCReadTssBits(PVM pVM, PVMCPU pVCpu, void *pvDst, void const *pvSrc, size_t cb)
     370{
     371    int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
     372    if (RT_SUCCESS(rc))
     373        return VINF_SUCCESS;
     374
     375    /** @todo use different fallback?    */
     376    rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
     377    AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
     378    if (rc == VINF_SUCCESS)
     379    {
     380        rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
     381        AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
     382    }
     383    return rc;
     384}
     385
     386
     387/**
     388 * Checks the guest TSS for changes after a write has been emulated.
     389 *
     390 * This is shared between the
     391 *
     392 * @returns Strict VBox status code appropriate for raw-mode returns.
     393 * @param   pVM             Pointer to the cross context VM structure.
     394 * @param   pVCpu           Pointer to the cross context virtual CPU structure.
     395 * @param   offGuestTss     The offset into the TSS of the write that was made.
     396 * @param   cbWrite         The number of bytes written.
     397 */
     398VBOXSTRICTRC selmRCGuestTssPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite)
     399{
     400    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
     401
     402    /*
     403     * If it's on the same page as the esp0 and ss0 fields or actually one of them,
     404     * then check if any of these has changed.
     405     */
     406/** @todo just read the darn fields and put them on the stack. */
     407    PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
     408    if (   PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
     409        && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
     410        && (   pGuestTss->esp0 !=  pVM->selm.s.Tss.esp1
     411            || pGuestTss->ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
     412       )
     413    {
     414        Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
     415             (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
     416        pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
     417        pVM->selm.s.Tss.ss1  = pGuestTss->ss0 | 1;
     418        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
     419    }
     420# ifdef VBOX_WITH_RAW_RING1
     421    else if (   EMIsRawRing1Enabled(pVM)
     422             && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
     423             && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
     424             && (   pGuestTss->esp1 !=  pVM->selm.s.Tss.esp2
     425                 || pGuestTss->ss1  != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
     426            )
     427    {
     428        Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
     429             (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
     430        pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
     431        pVM->selm.s.Tss.ss2  = (pGuestTss->ss1 & ~1) | 2;
     432        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
     433    }
     434# endif
     435    /* Handle misaligned TSS in a safe manner (just in case). */
     436    else if (   offGuestTss >= RT_UOFFSETOF(VBOXTSS, esp0)
     437             && offGuestTss < RT_UOFFSETOF(VBOXTSS, padding_ss0))
     438    {
     439        struct
     440        {
     441            uint32_t esp0;
     442            uint16_t ss0;
     443            uint16_t padding_ss0;
     444        } s;
     445        AssertCompileSize(s, 8);
     446        rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp0, sizeof(s));
     447        if (   rcStrict == VINF_SUCCESS
     448            && (    s.esp0 !=  pVM->selm.s.Tss.esp1
     449                ||  s.ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
     450           )
     451        {
     452            Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
     453                 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
     454            pVM->selm.s.Tss.esp1 = s.esp0;
     455            pVM->selm.s.Tss.ss1  = s.ss0 | 1;
     456            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
     457        }
     458    }
     459
     460    /*
     461     * If VME is enabled we need to check if the interrupt redirection bitmap
     462     * needs updating.
     463     */
     464    if (   offGuestTss >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
     465        && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
     466    {
     467        if (offGuestTss - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
     468        {
     469            uint16_t offIoBitmap = pGuestTss->offIoBitmap;
     470            if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
     471            {
     472                Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
     473                VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     474                VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
     475            }
     476            else
     477                Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
     478        }
     479        else
     480        {
     481            /** @todo not sure how the partial case is handled; probably not allowed */
     482            uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
     483            if (   offIntRedirBitmap <= offGuestTss
     484                && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offGuestTss + cbWrite
     485                && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
     486            {
     487                Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offGuestTss=%x cbWrite=%x\n",
     488                     pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offGuestTss, cbWrite));
     489
     490                /** @todo only update the changed part. */
     491                for (uint32_t i = 0; rcStrict == VINF_SUCCESS && i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
     492                    rcStrict = selmRCReadTssBits(pVM, pVCpu, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
     493                                                 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
     494                STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
     495            }
     496        }
     497    }
     498
     499    /*
     500     * Return to ring-3 for a full resync if any of the above fails... (?)
     501     */
     502    if (rcStrict != VINF_SUCCESS)
     503    {
     504        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     505        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
     506        if (RT_SUCCESS(rcStrict))
     507            rcStrict = VINF_SUCCESS;
     508    }
     509
     510    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
     511    return rcStrict;
     512}
     513
     514
     515/**
     516 * @callback_method_impl{FNPGMVIRTHANDLER, Guest TSS write access \#PF handler}
     517 */
     518DECLEXPORT(VBOXSTRICTRC) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     519                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     520{
     521    LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
     522    NOREF(pvRange); NOREF(pvUser);
     523
     524    /*
     525     * Try emulate the access.
     526     */
     527    uint32_t cb;
     528    VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
     529    if (   RT_SUCCESS(rcStrict)
     530        && cb)
     531        rcStrict = selmRCGuestTssPostWriteCheck(pVM, pVCpu, offRange, cb);
     532    else
     533    {
     534        AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict)));
     535        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     536        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
     537        if (rcStrict == VERR_EM_INTERPRETER)
     538            rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
     539    }
     540    return rcStrict;
     541}
     542
     543#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
     544
     545#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
     546/**
     547 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
    322548 *
    323549 * @returns VBox status code (appropriate for trap handling and GC return).
     
    333559 * @param   pvUser      Unused.
    334560 */
    335 DECLEXPORT(int) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    336                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    337 {
    338     /** @todo To be implemented. */
    339     ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
    340     NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
    341 
    342     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    343     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
    344     return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
     561DECLEXPORT(VBOXSTRICTRC) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     562                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     563{
     564    LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     565    NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
     566    return VERR_SELM_SHADOW_GDT_WRITE;
    345567}
    346568#endif
    347569
    348570
    349 #ifdef SELM_TRACK_GUEST_TSS_CHANGES
    350 /**
    351  * Read wrapper used by selmRCGuestTSSWriteHandler.
    352  * @returns VBox status code (appropriate for trap handling and GC return).
    353  * @param   pVM         Pointer to the VM.
    354  * @param   pvDst       Where to put the bits we read.
    355  * @param   pvSrc       Guest address to read from.
    356  * @param   cb          The number of bytes to read.
    357  */
    358 DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb)
    359 {
    360     PVMCPU pVCpu = VMMGetCpu0(pVM);
    361 
    362     int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
    363     if (RT_SUCCESS(rc))
    364         return VINF_SUCCESS;
    365 
    366     /** @todo use different fallback?    */
    367     rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
    368     AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
    369     if (rc == VINF_SUCCESS)
    370     {
    371         rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
    372         AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
    373     }
    374     return rc;
    375 }
    376 
    377 /**
    378  * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS.
    379  *
    380  * @returns VBox status code (appropriate for trap handling and GC return).
    381  * @param   pVM         Pointer to the VM.
    382  * @param   pVCpu       Pointer to the cross context CPU context for the
    383  *                      calling EMT.
    384  * @param   uErrorCode  CPU Error code.
    385  * @param   pRegFrame   Trap register frame.
    386  * @param   pvFault     The fault address (cr2).
    387  * @param   pvRange     The base address of the handled virtual range.
    388  * @param   offRange    The offset of the access into this range.
    389  *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
    390  * @param   pvUser      Unused.
    391  */
    392 DECLEXPORT(int) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    393                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    394 {
    395     LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
    396     NOREF(pvRange); NOREF(pvUser);
    397 
    398     /*
    399      * Try emulate the access.
    400      */
    401     uint32_t cb;
    402     VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
    403     if (   RT_SUCCESS(rcStrict)
    404         && cb)
    405     {
    406         rcStrict = VINF_SUCCESS;
    407 
    408         /*
    409          * If it's on the same page as the esp0 and ss0 fields or actually one of them,
    410          * then check if any of these has changed.
    411          */
    412         PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
    413         if (    PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
    414             &&  PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
    415             &&  (    pGuestTss->esp0 !=  pVM->selm.s.Tss.esp1
    416                  ||  pGuestTss->ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
    417            )
    418         {
    419             Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
    420                  (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
    421             pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
    422             pVM->selm.s.Tss.ss1  = pGuestTss->ss0 | 1;
    423             STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    424         }
    425 #ifdef VBOX_WITH_RAW_RING1
    426         else if (    EMIsRawRing1Enabled(pVM)
    427                  &&  PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
    428                  &&  PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
    429                  &&  (    pGuestTss->esp1 !=  pVM->selm.s.Tss.esp2
    430                       ||  pGuestTss->ss1  != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
    431                 )
    432         {
    433             Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
    434                  (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
    435             pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
    436             pVM->selm.s.Tss.ss2  = (pGuestTss->ss1 & ~1) | 2;
    437             STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    438         }
    439 #endif
    440         /* Handle misaligned TSS in a safe manner (just in case). */
    441         else if (   offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
    442                  && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0))
    443         {
    444             struct
    445             {
    446                 uint32_t esp0;
    447                 uint16_t ss0;
    448                 uint16_t padding_ss0;
    449             } s;
    450             AssertCompileSize(s, 8);
    451             rcStrict = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s));
    452             if (    rcStrict == VINF_SUCCESS
    453                 &&  (    s.esp0 !=  pVM->selm.s.Tss.esp1
    454                      ||  s.ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
    455                )
    456             {
    457                 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
    458                      (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
    459                 pVM->selm.s.Tss.esp1 = s.esp0;
    460                 pVM->selm.s.Tss.ss1  = s.ss0 | 1;
    461                 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    462             }
    463         }
    464 
    465         /*
    466          * If VME is enabled we need to check if the interrupt redirection bitmap
    467          * needs updating.
    468          */
    469         if (    offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
    470             &&  (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
    471         {
    472             if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
    473             {
    474                 uint16_t offIoBitmap = pGuestTss->offIoBitmap;
    475                 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
    476                 {
    477                     Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
    478                     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    479                     VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    480                 }
    481                 else
    482                     Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
    483             }
    484             else
    485             {
    486                 /** @todo not sure how the partial case is handled; probably not allowed */
    487                 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
    488                 if (   offIntRedirBitmap <= offRange
    489                     && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb
    490                     && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
    491                 {
    492                     Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n",
    493                          pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb));
    494 
    495                     /** @todo only update the changed part. */
    496                     for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
    497                     {
    498                         rcStrict = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
    499                                                      (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
    500                         if (rcStrict != VINF_SUCCESS)
    501                             break;
    502                     }
    503                     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
    504                 }
    505             }
    506         }
    507 
    508         /* Return to ring-3 for a full resync if any of the above fails... (?) */
    509         if (rcStrict != VINF_SUCCESS)
    510         {
    511             VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    512             VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    513             if (RT_SUCCESS(rcStrict))
    514                 rcStrict = VINF_SUCCESS;
    515         }
    516 
    517         STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
    518     }
    519     else
    520     {
    521         AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict)));
    522         VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    523         STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
    524         if (rcStrict == VERR_EM_INTERPRETER)
    525             rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
    526     }
    527     return VBOXSTRICTRC_TODO(rcStrict);
    528 }
    529 #endif /* SELM_TRACK_GUEST_TSS_CHANGES */
    530 
    531 
    532 #ifdef SELM_TRACK_SHADOW_GDT_CHANGES
    533 /**
    534  * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
     571#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
     572/**
     573 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
    535574 *
    536575 * @returns VBox status code (appropriate for trap handling and GC return).
     
    546585 * @param   pvUser      Unused.
    547586 */
    548 DECLEXPORT(int) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    549                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    550 {
    551     LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     587DECLEXPORT(VBOXSTRICTRC) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     588                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     589{
     590    LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     591    Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
    552592    NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
    553     return VERR_SELM_SHADOW_GDT_WRITE;
     593    return VERR_SELM_SHADOW_LDT_WRITE;
    554594}
    555595#endif
    556596
    557597
    558 #ifdef SELM_TRACK_SHADOW_LDT_CHANGES
    559 /**
    560  * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
     598#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
     599/**
     600 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
    561601 *
    562602 * @returns VBox status code (appropriate for trap handling and GC return).
     
    572612 * @param   pvUser      Unused.
    573613 */
    574 DECLEXPORT(int) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    575                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    576 {
    577     LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
    578     Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
    579     NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
    580     return VERR_SELM_SHADOW_LDT_WRITE;
    581 }
    582 #endif
    583 
    584 
    585 #ifdef SELM_TRACK_SHADOW_TSS_CHANGES
    586 /**
    587  * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
    588  *
    589  * @returns VBox status code (appropriate for trap handling and GC return).
    590  * @param   pVM         Pointer to the VM.
    591  * @param   pVCpu       Pointer to the cross context CPU context for the
    592  *                      calling EMT.
    593  * @param   uErrorCode   CPU Error code.
    594  * @param   pRegFrame   Trap register frame.
    595  * @param   pvFault     The fault address (cr2).
    596  * @param   pvRange     The base address of the handled virtual range.
    597  * @param   offRange    The offset of the access into this range.
    598  *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
    599  * @param   pvUser      Unused.
    600  */
    601 DECLEXPORT(int) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    602                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     614DECLEXPORT(VBOXSTRICTRC) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     615                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    603616{
    604617    LogRel(("FATAL ERROR: selmRCShadowTSSWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
  • trunk/src/VBox/VMM/VMMRC/TRPMRC.cpp

    r55900 r56013  
    105105 * @param   pvUser      Unused.
    106106 */
    107 DECLEXPORT(int) trpmRCGuestIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    108                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     107DECLEXPORT(VBOXSTRICTRC) trpmRCGuestIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     108                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    109109{
    110110    uint16_t    cbIDT;
     
    171171 * @param   pvUser      Unused.
    172172 */
    173 DECLEXPORT(int) trpmRCShadowIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    174                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     173DECLEXPORT(VBOXSTRICTRC) trpmRCShadowIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     174                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    175175{
    176176    LogRel(("FATAL ERROR: trpmRCShadowIDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%08RGv\r\n", pRegFrame->eip, pvFault, pvRange));
  • trunk/src/VBox/VMM/include/CSAMInternal.h

    r55937 r56013  
    282282}
    283283
     284PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER)  csamCodePageWriteHandler;
    284285RT_C_DECLS_BEGIN
    285286DECLEXPORT(FNPGMRCVIRTPFHANDLER)    csamRCCodePageWritePfHandler;
    286287RT_C_DECLS_END
    287 PGM_ALL_CB2_DECL(FNPGMR3VIRTHANDLER)  csamCodePageWriteHandler;
    288 
    289 #endif
     288
     289#endif
  • trunk/src/VBox/VMM/include/PATMInternal.h

    r55966 r56013  
    687687void patmR3DbgAddPatch(PVM pVM, PPATMPATCHREC pPatchRec);
    688688
    689 PGM_ALL_CB2_DECL(FNPGMR3VIRTHANDLER) patmVirtPageHandler;
    690 
    691 #endif
     689PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER) patmVirtPageHandler;
     690
     691#endif
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r55966 r56013  
    715715    R3PTRTYPE(PFNPGMR3VIRTINVALIDATE)   pfnInvalidateR3;
    716716    /** Pointer to R3 callback function. */
    717     R3PTRTYPE(PFNPGMR3VIRTHANDLER)      pfnHandlerR3;
     717    R3PTRTYPE(PFNPGMVIRTHANDLER)        pfnHandlerR3;
    718718    /** Description / Name. For easing debugging. */
    719719    R3PTRTYPE(const char *)             pszDesc;
  • trunk/src/VBox/VMM/include/SELMInternal.h

    r55895 r56013  
    230230RT_C_DECLS_BEGIN
    231231
    232 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestGDTWritePfHandler;
    233 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestLDTWritePfHandler;
    234 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestTSSWritePfHandler;
    235 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowGDTWritePfHandler;
    236 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowLDTWritePfHandler;
    237 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowTSSWritePfHandler;
    238 
    239 void           selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
     232PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER)  selmGuestGDTWriteHandler;
     233DECLEXPORT(FNPGMRCVIRTPFHANDLER)    selmRCGuestGDTWritePfHandler;
     234PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER)  selmGuestLDTWriteHandler;
     235DECLEXPORT(FNPGMRCVIRTPFHANDLER)    selmRCGuestLDTWritePfHandler;
     236PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER)  selmGuestTSSWriteHandler;
     237DECLEXPORT(FNPGMRCVIRTPFHANDLER)    selmRCGuestTSSWritePfHandler;
     238DECLEXPORT(FNPGMRCVIRTPFHANDLER)    selmRCShadowGDTWritePfHandler;
     239DECLEXPORT(FNPGMRCVIRTPFHANDLER)    selmRCShadowLDTWritePfHandler;
     240DECLEXPORT(FNPGMRCVIRTPFHANDLER)    selmRCShadowTSSWritePfHandler;
     241
     242void            selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry);
     243void            selmRCGuestGdtPreWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx);
     244VBOXSTRICTRC    selmRCGuestGdtPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite, PCPUMCTX pCtx);
     245VBOXSTRICTRC    selmRCGuestTssPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite);
     246void            selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
    240247#ifdef VBOX_WITH_RAW_RING1
    241 void           selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
     248void            selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);
    242249#endif
    243250
  • trunk/src/VBox/VMM/include/TRPMInternal.h

    r55895 r56013  
    2525#include <VBox/vmm/pgm.h>
    2626
    27 
    28 
    29 /** Enable to allow trap forwarding in GC. */
    30 #ifdef VBOX_WITH_RAW_MODE
    31 # define TRPM_FORWARD_TRAPS_IN_GC
    32 #endif
    33 
    34 /** First interrupt handler. Used for validating input. */
    35 #define TRPM_HANDLER_INT_BASE  0x20
    36 
    3727RT_C_DECLS_BEGIN
    3828
     
    4333 * @{
    4434 */
     35
     36
     37#ifdef VBOX_WITH_RAW_MODE
     38/** Enable or disable tracking of Guest's IDT. */
     39# define TRPM_TRACK_GUEST_IDT_CHANGES
     40/** Enable or disable tracking of Shadow IDT. */
     41# define TRPM_TRACK_SHADOW_IDT_CHANGES
     42#endif
     43
     44
     45/** Enable to allow trap forwarding in GC. */
     46#ifdef VBOX_WITH_RAW_MODE
     47# define TRPM_FORWARD_TRAPS_IN_GC
     48#endif
     49
     50/** First interrupt handler. Used for validating input. */
     51#define TRPM_HANDLER_INT_BASE  0x20
     52
    4553
    4654/** @name   TRPMGCTrapIn* flags.
     
    249257
    250258
    251 DECLEXPORT(FNPGMRCVIRTPFHANDLER) trpmRCGuestIDTWritePfHandler;
    252 DECLEXPORT(FNPGMRCVIRTPFHANDLER) trpmRCShadowIDTWritePfHandler;
     259PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER)  trpmGuestIDTWriteHandler;
     260DECLEXPORT(FNPGMRCVIRTPFHANDLER)    trpmRCGuestIDTWritePfHandler;
     261DECLEXPORT(FNPGMRCVIRTPFHANDLER)    trpmRCShadowIDTWritePfHandler;
    253262
    254263/**
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette