VirtualBox

Changeset 56013 in vbox for trunk/src/VBox/VMM/VMMRC


Ignore:
Timestamp:
May 21, 2015 5:04:14 PM (10 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
100526
Message:

PGM: Made the virtual handler callbacks return VBOXSTRICTRC and prepared for RC execution.

Location:
trunk/src/VBox/VMM/VMMRC
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMRC/CSAMRC.cpp

    r55966 r56013  
    6767 * @param   pvUser      Ignored.
    6868 */
    69 DECLEXPORT(int) csamRCCodePageWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    70                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     69DECLEXPORT(VBOXSTRICTRC) csamRCCodePageWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     70                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    7171{
    7272    PPATMGCSTATE pPATMGCState;
  • trunk/src/VBox/VMM/VMMRC/PATMRC.cpp

    r55966 r56013  
    6060 * @param   pvUser      The physical address of the guest page being monitored.
    6161 */
    62 DECLEXPORT(int) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    63                                         RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     62DECLEXPORT(VBOXSTRICTRC) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     63                                                 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    6464{
    6565    NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
  • trunk/src/VBox/VMM/VMMRC/SELMRC.cpp

    r55966 r56013  
    4848
    4949#ifdef SELM_TRACK_GUEST_GDT_CHANGES
     50
    5051/**
    5152 * Synchronizes one GDT entry (guest -> shadow).
     
    5354 * @returns VBox strict status code (appropriate for trap handling and GC
    5455 *          return).
     56 * @retval  VINF_SUCCESS
    5557 * @retval  VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
    5658 * @retval  VINF_SELM_SYNC_GDT
    57  * @retval  VINF_EM_RESCHEDULE_REM
    5859 *
    5960 * @param   pVM         Pointer to the VM.
    6061 * @param   pVCpu       The current virtual CPU.
    61  * @param   pRegFrame   Trap register frame.
     62 * @param   pCtx        CPU context for the current CPU.
    6263 * @param   iGDTEntry   The GDT entry to sync.
    6364 *
    6465 * @remarks Caller checks that this isn't the LDT entry!
    6566 */
    66 static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
     67static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
    6768{
    6869    Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
     
    9091            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    9192            VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
    92             return VINF_EM_RESCHEDULE_REM;
     93            /* return VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
     94            return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
    9395        }
    9496    }
     
    136138     */
    137139    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    138     PCPUMCTX     pCtx     = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
    139140    PCPUMSELREG  paSReg   = CPUMCTX_FIRST_SREG(pCtx);
    140141    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
     
    180181 * @param   pVM         Pointer to the VM.
    181182 * @param   pVCpu       The current virtual CPU.
    182  * @param   pRegFrame   Trap register frame.
     183 * @param   pCtx        The CPU context.
    183184 * @param   iGDTEntry   The GDT entry to sync.
    184185 */
    185 static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
     186void selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
    186187{
    187188    /*
     
    200201    PCX86DESC       pDesc    = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
    201202    uint32_t        uCpl     = CPUMGetGuestCPL(pVCpu);
    202     PCPUMCTX        pCtx     = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
    203203    PCPUMSELREG     paSReg   = CPUMCTX_FIRST_SREG(pCtx);
    204204    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
     
    219219        }
    220220    }
    221 
    222 }
    223 
    224 
    225 /**
    226  * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT.
    227  *
    228  * @returns VBox status code (appropriate for trap handling and GC return).
    229  * @param   pVM         Pointer to the VM.
    230  * @param   pVCpu       Pointer to the cross context CPU context for the
    231  *                      calling EMT.
    232  * @param   uErrorCode  CPU Error code.
    233  * @param   pRegFrame   Trap register frame.
    234  * @param   pvFault     The fault address (cr2).
    235  * @param   pvRange     The base address of the handled virtual range.
    236  * @param   offRange    The offset of the access into this range.
    237  *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
    238  */
    239 DECLEXPORT(int) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    240                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     221}
     222
     223
     224/**
     225 * Syncs hidden selector register parts before emulating a GDT change.
     226 *
     227 * This is shared between the selmRCGuestGDTWritePfHandler and
     228 * selmGuestGDTWriteHandler.
     229 *
     230 * @param   pVM             Pointer to the cross context VM structure.
     231 * @param   pVCpu           Pointer to the cross context virtual CPU structure.
     232 * @param   offGuestTss     The offset into the TSS of the write that was made.
     233 * @param   cbWrite         The number of bytes written.
     234 * @param   pCtx            The current CPU context.
     235 */
     236void selmRCGuestGdtPreWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
     237{
     238    uint32_t       iGdt      = offGuestGdt >> X86_SEL_SHIFT;
     239    uint32_t const iGdtLast  = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
     240    do
     241    {
     242        selmRCSyncGdtSegRegs(pVM, pVCpu, pCtx, iGdt);
     243        iGdt++;
     244    } while (iGdt <= iGdtLast);
     245}
     246
     247
     248/**
     249 * Checks the guest GDT for changes after a write has been emulated.
     250 *
     251 *
     252 * This is shared between the selmRCGuestGDTWritePfHandler and
     253 * selmGuestGDTWriteHandler.
     254 *
     255 * @retval  VINF_SUCCESS
     256 * @retval  VINF_SELM_SYNC_GDT
     257 * @retval  VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
     258 *
     259 * @param   pVM             Pointer to the cross context VM structure.
     260 * @param   pVCpu           Pointer to the cross context virtual CPU structure.
     261 * @param   offGuestTss     The offset into the TSS of the write that was made.
     262 * @param   cbWrite         The number of bytes written.
     263 * @param   pCtx            The current CPU context.
     264 */
     265VBOXSTRICTRC selmRCGuestGdtPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx)
     266{
     267    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
     268
     269    /* Check if the LDT was in any way affected.  Do not sync the
     270       shadow GDT if that's the case or we might have trouble in
     271       the world switcher (or so they say). */
     272    uint32_t const iGdtFirst = offGuestGdt >> X86_SEL_SHIFT;
     273    uint32_t const iGdtLast  = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT;
     274    uint32_t const iLdt      = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
     275    if (iGdtFirst <= iLdt && iGdtLast >= iLdt)
     276    {
     277        Log(("LDTR selector change -> fall back to HC!!\n"));
     278        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     279        rcStrict = VINF_SELM_SYNC_GDT;
     280        /** @todo Implement correct stale LDT handling.  */
     281    }
     282    else
     283    {
     284        /* Sync the shadow GDT and continue provided the update didn't
     285           cause any segment registers to go stale in any way. */
     286        uint32_t iGdt = iGdtFirst;
     287        do
     288        {
     289            VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pCtx, iGdt);
     290            Assert(rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT || rcStrict2 == VINF_SELM_SYNC_GDT);
     291            if (rcStrict == VINF_SUCCESS)
     292                rcStrict = rcStrict2;
     293            iGdt++;
     294        } while (   iGdt <= iGdtLast
     295                 && (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT));
     296        if (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT)
     297            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
     298    }
     299    return rcStrict;
     300}
     301
     302
     303/**
     304 * @callback_method_impl{FNPGMVIRTHANDLER, Guest GDT write access \#PF handler }
     305 */
     306DECLEXPORT(VBOXSTRICTRC) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     307                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    241308{
    242309    LogFlow(("selmRCGuestGDTWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
     
    246313     * Check if any selectors might be affected.
    247314     */
    248     unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT;
    249     selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1);
    250     if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1)
    251         selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1);
     315    selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offRange, 8 /*cbWrite*/, CPUMCTX_FROM_CORE(pRegFrame));
    252316
    253317    /*
     
    257321    VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
    258322    if (RT_SUCCESS(rcStrict) && cb)
    259     {
    260         /* Check if the LDT was in any way affected.  Do not sync the
    261            shadow GDT if that's the case or we might have trouble in
    262            the world switcher (or so they say). */
    263         unsigned const iLdt   = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
    264         unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT;
    265         if (   iGDTE1 == iLdt
    266             || iGDTE2 == iLdt)
    267         {
    268             Log(("LDTR selector change -> fall back to HC!!\n"));
    269             VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    270             rcStrict = VINF_SELM_SYNC_GDT;
    271             /** @todo Implement correct stale LDT handling.  */
    272         }
    273         else
    274         {
    275             /* Sync the shadow GDT and continue provided the update didn't
    276                cause any segment registers to go stale in any way. */
    277             VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1);
    278             if (rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RESCHEDULE_REM)
    279             {
    280                 if (rcStrict == VINF_SUCCESS)
    281                     rcStrict = rcStrict2;
    282 
    283                 if (iGDTE1 != iGDTE2)
    284                 {
    285                     rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2);
    286                     if (rcStrict == VINF_SUCCESS)
    287                         rcStrict = rcStrict2;
    288                 }
    289 
    290                 if (rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RESCHEDULE_REM)
    291                 {
    292                     /* VINF_EM_RESCHEDULE_REM - bad idea if we're in a patch. */
    293                     if (rcStrict2 == VINF_EM_RESCHEDULE_REM)
    294                         rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
    295                     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
    296                     return VBOXSTRICTRC_TODO(rcStrict);
    297                 }
    298             }
    299 
    300             /* sync failed, return to ring-3 and resync the GDT. */
    301             if (rcStrict == VINF_SUCCESS || RT_FAILURE(rcStrict2))
    302                 rcStrict = rcStrict2;
    303         }
    304     }
     323        rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offRange, cb, CPUMCTX_FROM_CORE(pRegFrame));
    305324    else
    306325    {
     
    311330    }
    312331
    313     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
    314     return VBOXSTRICTRC_TODO(rcStrict);
    315 }
     332    if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
     333        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
     334    else
     335        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
     336    return rcStrict;
     337}
     338
    316339#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
    317340
    318 
    319341#ifdef SELM_TRACK_GUEST_LDT_CHANGES
    320342/**
    321  * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
     343 * @callback_method_impl{FNPGMVIRTHANDLER, Guest LDT write access \#PF handler }
     344 */
     345DECLEXPORT(VBOXSTRICTRC) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     346                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     347{
     348    /** @todo To be implemented... or not. */
     349    ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     350    NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
     351
     352    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     353    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
     354    return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
     355}
     356#endif
     357
     358
     359#ifdef SELM_TRACK_GUEST_TSS_CHANGES
     360
     361/**
     362 * Read wrapper used by selmRCGuestTSSWriteHandler.
     363 * @returns VBox status code (appropriate for trap handling and GC return).
     364 * @param   pVM         Pointer to the VM.
     365 * @param   pvDst       Where to put the bits we read.
     366 * @param   pvSrc       Guest address to read from.
     367 * @param   cb          The number of bytes to read.
     368 */
     369DECLINLINE(int) selmRCReadTssBits(PVM pVM, PVMCPU pVCpu, void *pvDst, void const *pvSrc, size_t cb)
     370{
     371    int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
     372    if (RT_SUCCESS(rc))
     373        return VINF_SUCCESS;
     374
     375    /** @todo use different fallback?    */
     376    rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
     377    AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
     378    if (rc == VINF_SUCCESS)
     379    {
     380        rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
     381        AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
     382    }
     383    return rc;
     384}
     385
     386
     387/**
     388 * Checks the guest TSS for changes after a write has been emulated.
     389 *
     390 * This is shared between the
     391 *
     392 * @returns Strict VBox status code appropriate for raw-mode returns.
     393 * @param   pVM             Pointer to the cross context VM structure.
     394 * @param   pVCpu           Pointer to the cross context virtual CPU structure.
     395 * @param   offGuestTss     The offset into the TSS of the write that was made.
     396 * @param   cbWrite         The number of bytes written.
     397 */
     398VBOXSTRICTRC selmRCGuestTssPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite)
     399{
     400    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
     401
     402    /*
     403     * If it's on the same page as the esp0 and ss0 fields or actually one of them,
     404     * then check if any of these has changed.
     405     */
     406/** @todo just read the darn fields and put them on the stack. */
     407    PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
     408    if (   PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
     409        && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
     410        && (   pGuestTss->esp0 !=  pVM->selm.s.Tss.esp1
     411            || pGuestTss->ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
     412       )
     413    {
     414        Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
     415             (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
     416        pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
     417        pVM->selm.s.Tss.ss1  = pGuestTss->ss0 | 1;
     418        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
     419    }
     420# ifdef VBOX_WITH_RAW_RING1
     421    else if (   EMIsRawRing1Enabled(pVM)
     422             && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
     423             && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
     424             && (   pGuestTss->esp1 !=  pVM->selm.s.Tss.esp2
     425                 || pGuestTss->ss1  != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
     426            )
     427    {
     428        Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
     429             (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
     430        pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
     431        pVM->selm.s.Tss.ss2  = (pGuestTss->ss1 & ~1) | 2;
     432        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
     433    }
     434# endif
     435    /* Handle misaligned TSS in a safe manner (just in case). */
     436    else if (   offGuestTss >= RT_UOFFSETOF(VBOXTSS, esp0)
     437             && offGuestTss < RT_UOFFSETOF(VBOXTSS, padding_ss0))
     438    {
     439        struct
     440        {
     441            uint32_t esp0;
     442            uint16_t ss0;
     443            uint16_t padding_ss0;
     444        } s;
     445        AssertCompileSize(s, 8);
     446        rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp0, sizeof(s));
     447        if (   rcStrict == VINF_SUCCESS
     448            && (    s.esp0 !=  pVM->selm.s.Tss.esp1
     449                ||  s.ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
     450           )
     451        {
     452            Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
     453                 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
     454            pVM->selm.s.Tss.esp1 = s.esp0;
     455            pVM->selm.s.Tss.ss1  = s.ss0 | 1;
     456            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
     457        }
     458    }
     459
     460    /*
     461     * If VME is enabled we need to check if the interrupt redirection bitmap
     462     * needs updating.
     463     */
     464    if (   offGuestTss >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
     465        && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
     466    {
     467        if (offGuestTss - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
     468        {
     469            uint16_t offIoBitmap = pGuestTss->offIoBitmap;
     470            if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
     471            {
     472                Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
     473                VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     474                VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
     475            }
     476            else
     477                Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
     478        }
     479        else
     480        {
     481            /** @todo not sure how the partial case is handled; probably not allowed */
     482            uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
     483            if (   offIntRedirBitmap <= offGuestTss
     484                && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offGuestTss + cbWrite
     485                && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
     486            {
     487                Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offGuestTss=%x cbWrite=%x\n",
     488                     pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offGuestTss, cbWrite));
     489
     490                /** @todo only update the changed part. */
     491                for (uint32_t i = 0; rcStrict == VINF_SUCCESS && i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
     492                    rcStrict = selmRCReadTssBits(pVM, pVCpu, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
     493                                                 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
     494                STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
     495            }
     496        }
     497    }
     498
     499    /*
     500     * Return to ring-3 for a full resync if any of the above fails... (?)
     501     */
     502    if (rcStrict != VINF_SUCCESS)
     503    {
     504        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     505        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
     506        if (RT_SUCCESS(rcStrict))
     507            rcStrict = VINF_SUCCESS;
     508    }
     509
     510    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
     511    return rcStrict;
     512}
     513
     514
     515/**
     516 * @callback_method_impl{FNPGMVIRTHANDLER, Guest TSS write access \#PF handler}
     517 */
     518DECLEXPORT(VBOXSTRICTRC) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     519                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     520{
     521    LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
     522    NOREF(pvRange); NOREF(pvUser);
     523
     524    /*
     525     * Try emulate the access.
     526     */
     527    uint32_t cb;
     528    VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
     529    if (   RT_SUCCESS(rcStrict)
     530        && cb)
     531        rcStrict = selmRCGuestTssPostWriteCheck(pVM, pVCpu, offRange, cb);
     532    else
     533    {
     534        AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict)));
     535        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     536        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
     537        if (rcStrict == VERR_EM_INTERPRETER)
     538            rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
     539    }
     540    return rcStrict;
     541}
     542
     543#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
     544
     545#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
     546/**
     547 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
    322548 *
    323549 * @returns VBox status code (appropriate for trap handling and GC return).
     
    333559 * @param   pvUser      Unused.
    334560 */
    335 DECLEXPORT(int) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    336                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    337 {
    338     /** @todo To be implemented. */
    339     ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
    340     NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
    341 
    342     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    343     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
    344     return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
     561DECLEXPORT(VBOXSTRICTRC) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     562                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     563{
     564    LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     565    NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
     566    return VERR_SELM_SHADOW_GDT_WRITE;
    345567}
    346568#endif
    347569
    348570
    349 #ifdef SELM_TRACK_GUEST_TSS_CHANGES
    350 /**
    351  * Read wrapper used by selmRCGuestTSSWriteHandler.
    352  * @returns VBox status code (appropriate for trap handling and GC return).
    353  * @param   pVM         Pointer to the VM.
    354  * @param   pvDst       Where to put the bits we read.
    355  * @param   pvSrc       Guest address to read from.
    356  * @param   cb          The number of bytes to read.
    357  */
    358 DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb)
    359 {
    360     PVMCPU pVCpu = VMMGetCpu0(pVM);
    361 
    362     int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
    363     if (RT_SUCCESS(rc))
    364         return VINF_SUCCESS;
    365 
    366     /** @todo use different fallback?    */
    367     rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
    368     AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
    369     if (rc == VINF_SUCCESS)
    370     {
    371         rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
    372         AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
    373     }
    374     return rc;
    375 }
    376 
    377 /**
    378  * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS.
    379  *
    380  * @returns VBox status code (appropriate for trap handling and GC return).
    381  * @param   pVM         Pointer to the VM.
    382  * @param   pVCpu       Pointer to the cross context CPU context for the
    383  *                      calling EMT.
    384  * @param   uErrorCode  CPU Error code.
    385  * @param   pRegFrame   Trap register frame.
    386  * @param   pvFault     The fault address (cr2).
    387  * @param   pvRange     The base address of the handled virtual range.
    388  * @param   offRange    The offset of the access into this range.
    389  *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
    390  * @param   pvUser      Unused.
    391  */
    392 DECLEXPORT(int) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    393                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    394 {
    395     LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
    396     NOREF(pvRange); NOREF(pvUser);
    397 
    398     /*
    399      * Try emulate the access.
    400      */
    401     uint32_t cb;
    402     VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
    403     if (   RT_SUCCESS(rcStrict)
    404         && cb)
    405     {
    406         rcStrict = VINF_SUCCESS;
    407 
    408         /*
    409          * If it's on the same page as the esp0 and ss0 fields or actually one of them,
    410          * then check if any of these has changed.
    411          */
    412         PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
    413         if (    PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
    414             &&  PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
    415             &&  (    pGuestTss->esp0 !=  pVM->selm.s.Tss.esp1
    416                  ||  pGuestTss->ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
    417            )
    418         {
    419             Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
    420                  (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
    421             pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
    422             pVM->selm.s.Tss.ss1  = pGuestTss->ss0 | 1;
    423             STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    424         }
    425 #ifdef VBOX_WITH_RAW_RING1
    426         else if (    EMIsRawRing1Enabled(pVM)
    427                  &&  PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
    428                  &&  PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
    429                  &&  (    pGuestTss->esp1 !=  pVM->selm.s.Tss.esp2
    430                       ||  pGuestTss->ss1  != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
    431                 )
    432         {
    433             Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
    434                  (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
    435             pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
    436             pVM->selm.s.Tss.ss2  = (pGuestTss->ss1 & ~1) | 2;
    437             STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    438         }
    439 #endif
    440         /* Handle misaligned TSS in a safe manner (just in case). */
    441         else if (   offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
    442                  && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0))
    443         {
    444             struct
    445             {
    446                 uint32_t esp0;
    447                 uint16_t ss0;
    448                 uint16_t padding_ss0;
    449             } s;
    450             AssertCompileSize(s, 8);
    451             rcStrict = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s));
    452             if (    rcStrict == VINF_SUCCESS
    453                 &&  (    s.esp0 !=  pVM->selm.s.Tss.esp1
    454                      ||  s.ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
    455                )
    456             {
    457                 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
    458                      (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
    459                 pVM->selm.s.Tss.esp1 = s.esp0;
    460                 pVM->selm.s.Tss.ss1  = s.ss0 | 1;
    461                 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    462             }
    463         }
    464 
    465         /*
    466          * If VME is enabled we need to check if the interrupt redirection bitmap
    467          * needs updating.
    468          */
    469         if (    offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
    470             &&  (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
    471         {
    472             if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
    473             {
    474                 uint16_t offIoBitmap = pGuestTss->offIoBitmap;
    475                 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
    476                 {
    477                     Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
    478                     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    479                     VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    480                 }
    481                 else
    482                     Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
    483             }
    484             else
    485             {
    486                 /** @todo not sure how the partial case is handled; probably not allowed */
    487                 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
    488                 if (   offIntRedirBitmap <= offRange
    489                     && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb
    490                     && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
    491                 {
    492                     Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n",
    493                          pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb));
    494 
    495                     /** @todo only update the changed part. */
    496                     for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
    497                     {
    498                         rcStrict = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
    499                                                      (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
    500                         if (rcStrict != VINF_SUCCESS)
    501                             break;
    502                     }
    503                     STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
    504                 }
    505             }
    506         }
    507 
    508         /* Return to ring-3 for a full resync if any of the above fails... (?) */
    509         if (rcStrict != VINF_SUCCESS)
    510         {
    511             VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    512             VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    513             if (RT_SUCCESS(rcStrict))
    514                 rcStrict = VINF_SUCCESS;
    515         }
    516 
    517         STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
    518     }
    519     else
    520     {
    521         AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict)));
    522         VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    523         STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
    524         if (rcStrict == VERR_EM_INTERPRETER)
    525             rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
    526     }
    527     return VBOXSTRICTRC_TODO(rcStrict);
    528 }
    529 #endif /* SELM_TRACK_GUEST_TSS_CHANGES */
    530 
    531 
    532 #ifdef SELM_TRACK_SHADOW_GDT_CHANGES
    533 /**
    534  * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
     571#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
     572/**
     573 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
    535574 *
    536575 * @returns VBox status code (appropriate for trap handling and GC return).
     
    546585 * @param   pvUser      Unused.
    547586 */
    548 DECLEXPORT(int) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    549                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    550 {
    551     LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     587DECLEXPORT(VBOXSTRICTRC) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     588                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     589{
     590    LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
     591    Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
    552592    NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
    553     return VERR_SELM_SHADOW_GDT_WRITE;
     593    return VERR_SELM_SHADOW_LDT_WRITE;
    554594}
    555595#endif
    556596
    557597
    558 #ifdef SELM_TRACK_SHADOW_LDT_CHANGES
    559 /**
    560  * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
     598#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
     599/**
     600 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
    561601 *
    562602 * @returns VBox status code (appropriate for trap handling and GC return).
     
    572612 * @param   pvUser      Unused.
    573613 */
    574 DECLEXPORT(int) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    575                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    576 {
    577     LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
    578     Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
    579     NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
    580     return VERR_SELM_SHADOW_LDT_WRITE;
    581 }
    582 #endif
    583 
    584 
    585 #ifdef SELM_TRACK_SHADOW_TSS_CHANGES
    586 /**
    587  * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
    588  *
    589  * @returns VBox status code (appropriate for trap handling and GC return).
    590  * @param   pVM         Pointer to the VM.
    591  * @param   pVCpu       Pointer to the cross context CPU context for the
    592  *                      calling EMT.
    593  * @param   uErrorCode   CPU Error code.
    594  * @param   pRegFrame   Trap register frame.
    595  * @param   pvFault     The fault address (cr2).
    596  * @param   pvRange     The base address of the handled virtual range.
    597  * @param   offRange    The offset of the access into this range.
    598  *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
    599  * @param   pvUser      Unused.
    600  */
    601 DECLEXPORT(int) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    602                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     614DECLEXPORT(VBOXSTRICTRC) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     615                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    603616{
    604617    LogRel(("FATAL ERROR: selmRCShadowTSSWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
  • trunk/src/VBox/VMM/VMMRC/TRPMRC.cpp

    r55900 r56013  
    105105 * @param   pvUser      Unused.
    106106 */
    107 DECLEXPORT(int) trpmRCGuestIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    108                                              RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     107DECLEXPORT(VBOXSTRICTRC) trpmRCGuestIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     108                                                      RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    109109{
    110110    uint16_t    cbIDT;
     
    171171 * @param   pvUser      Unused.
    172172 */
    173 DECLEXPORT(int) trpmRCShadowIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
    174                                               RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
     173DECLEXPORT(VBOXSTRICTRC) trpmRCShadowIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
     174                                                       RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
    175175{
    176176    LogRel(("FATAL ERROR: trpmRCShadowIDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%08RGv\r\n", pRegFrame->eip, pvFault, pvRange));
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette