VirtualBox

Changeset 74584 in vbox for trunk/src


Ignore:
Timestamp:
Oct 2, 2018 3:19:29 PM (6 years ago)
Author:
vboxsync
Message:

VMM: Don't access the guest TSS directly in selmRCGuestTssPostWriteCheck as EMInterpretInstructionEx no longer ensures the TSS page is loaded into the shadow page tables (IEM change). ticketref:18008

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMRC/SELMRC.cpp

    r69111 r74584  
    402402
    403403    /*
    404      * If it's on the same page as the esp0 and ss0 fields or actually one of them,
    405      * then check if any of these has changed.
    406      */
    407 /** @todo just read the darn fields and put them on the stack. */
     404     * Check if the ring-0 or/and ring-1 stacks have been change,
     405     * synchronize our ring-compressed copies of the stacks.
     406     */
     407    struct
     408    {
     409        uint32_t esp;
     410        uint16_t ss;
     411        uint16_t padding_ss;
     412    } s;
     413    AssertCompileSize(s, 8);
    408414    PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
    409     if (   PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
    410         && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
    411         && (   pGuestTss->esp0 !=  pVM->selm.s.Tss.esp1
    412             || pGuestTss->ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
    413        )
    414     {
    415         Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
    416              (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
    417         pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
    418         pVM->selm.s.Tss.ss1  = pGuestTss->ss0 | 1;
    419         STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    420     }
    421 # ifdef VBOX_WITH_RAW_RING1
    422     else if (   EMIsRawRing1Enabled(pVM)
    423              && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
    424              && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss)
    425              && (   pGuestTss->esp1 !=  pVM->selm.s.Tss.esp2
    426                  || pGuestTss->ss1  != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
    427             )
    428     {
    429         Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
    430              (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
    431         pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
    432         pVM->selm.s.Tss.ss2  = (pGuestTss->ss1 & ~1) | 2;
    433         STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    434     }
    435 # endif
    436     /* Handle misaligned TSS in a safe manner (just in case). */
    437     else if (   offGuestTss >= RT_UOFFSETOF(VBOXTSS, esp0)
    438              && offGuestTss < RT_UOFFSETOF(VBOXTSS, padding_ss0))
    439     {
    440         struct
    441         {
    442             uint32_t esp0;
    443             uint16_t ss0;
    444             uint16_t padding_ss0;
    445         } s;
    446         AssertCompileSize(s, 8);
     415    if (   offGuestTss           < RT_UOFFSET_AFTER(VBOXTSS, ss0)
     416        && offGuestTss + cbWrite > RT_UOFFSETOF(VBOXTSS, esp0))
     417    {
    447418        rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp0, sizeof(s));
    448419        if (   rcStrict == VINF_SUCCESS
    449             && (    s.esp0 !=  pVM->selm.s.Tss.esp1
    450                 ||  s.ss0  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
    451            )
    452         {
    453             Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
    454                  (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
    455             pVM->selm.s.Tss.esp1 = s.esp0;
    456             pVM->selm.s.Tss.ss1  = s.ss0 | 1;
     420            && (   s.esp !=  pVM->selm.s.Tss.esp1
     421                || s.ss  != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */)
     422        {
     423            Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
     424                 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss, (RTGCPTR)s.esp));
     425            pVM->selm.s.Tss.esp1 = s.esp;
     426            pVM->selm.s.Tss.ss1  = s.ss | 1;
    457427            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
    458428        }
    459429    }
     430# ifdef VBOX_WITH_RAW_RING1
     431    if (   EMIsRawRing1Enabled(pVM)
     432        && offGuestTss           < RT_UOFFSET_AFTER(VBOXTSS, ss1)
     433        && offGuestTss + cbWrite > RT_UOFFSETOF(VBOXTSS, esp1)
     434        && rcStrict == VINF_SUCCESS)
     435    {
     436        rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp1, sizeof(s));
     437        if (   rcStrict == VINF_SUCCESS
     438            && (   s.esp !=  pVM->selm.s.Tss.esp2
     439                || s.ss  != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */)
     440        {
     441
     442            Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
     443                 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)s.ss, (RTGCPTR)s.esp));
     444            pVM->selm.s.Tss.esp2 = s.esp;
     445            pVM->selm.s.Tss.ss2  = (s.ss & ~1) | 2;
     446            STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
     447        }
     448    }
     449# endif
    460450
    461451    /*
     
    464454     */
    465455    if (   offGuestTss >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
    466         && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
    467     {
    468         if (offGuestTss - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
    469         {
    470             uint16_t offIoBitmap = pGuestTss->offIoBitmap;
    471             if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
     456        && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME)
     457        && rcStrict == VINF_SUCCESS)
     458    {
     459        if (   offGuestTss           < RT_UOFFSET_AFTER(VBOXTSS, offIoBitmap)
     460            && offGuestTss + cbWrite > RT_UOFFSETOF(VBOXTSS, offIoBitmap))
     461        {
     462            uint16_t offIoBitmap = 0;
     463            rcStrict = selmRCReadTssBits(pVM, pVCpu, &offIoBitmap, &pGuestTss->offIoBitmap, sizeof(offIoBitmap));
     464            if (   rcStrict != VINF_SUCCESS
     465                || offIoBitmap != pVM->selm.s.offGuestIoBitmap)
    472466            {
    473467                Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
     
    478472                Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
    479473        }
    480         else
     474
     475        if (   rcStrict == VINF_SUCCESS
     476            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS)
     477            && pVM->selm.s.offGuestIoBitmap != 0)
    481478        {
    482479            /** @todo not sure how the partial case is handled; probably not allowed */
    483480            uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
    484             if (   offIntRedirBitmap <= offGuestTss
    485                 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offGuestTss + cbWrite
     481            if (   offGuestTss           < offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap)
     482                && offGuestTss + cbWrite > offIntRedirBitmap
    486483                && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
    487484            {
     
    505502        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    506503        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    507         if (RT_SUCCESS(rcStrict))
     504        if (RT_SUCCESS(rcStrict) || rcStrict == VERR_ACCESS_DENIED)
    508505            rcStrict = VINF_SUCCESS;
    509506    }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette