VirtualBox

Ignore:
Timestamp:
May 13, 2019 9:52:54 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM, IEM: Nested VMX: bugref:9180 Hardware-assisted VMX VM-exit handling interface bits, I/O exit handling, comments and disabled code on what needs to be done with future optimizations.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r78371 r78481  
    76157615                    }
    76167616                }
     7617
     7618#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     7619# if 0
     7620                /** @todo NSTVMX: We handle each of these fields individually by passing it to IEM
     7621                 *        VM-exit handlers. We might handle it differently when using the fast path. */
     7622                /*
     7623                 * The hardware virtualization state currently consists of VMCS fields that may be
     7624                 * modified by execution of the nested-guest (that are not part of the general
     7625                 * guest state) and is visible to guest software. Hence, it is technically part of
     7626                 * the guest-CPU state when executing a nested-guest.
     7627                 */
     7628                if (   (fWhat & CPUMCTX_EXTRN_HWVIRT)
     7629                    && CPUMIsGuestInVmxNonRootMode(pCtx))
     7630                {
     7631                    PVMXVVMCS pGstVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
     7632                    rc  = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON,        &pGstVmcs->u32RoExitReason);
     7633                    rc |= VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pGstVmcs->u64RoExitQual.u);
     7634                    VMXLOCAL_BREAK_RC(rc);
     7635
     7636                    /*
     7637                     * VM-entry can fail due to invalid-guest state, machine-check events and
     7638                     * MSR loading failures. Other than VM-exit reason and VM-exit qualification
     7639                     * all other VMCS fields are left unmodified on VM-entry failure.
     7640                     *
     7641                     * See Intel spec. 26.7 "VM-entry Failures During Or After Loading Guest State".
     7642                     */
     7643                    bool const fEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(pGstVmcs->u32RoExitReason);
     7644                    if (!fEntryFailed)
     7645                    {
     7646                        /*
     7647                         * Some notes on VMCS fields that may need importing when the fast path
     7648                         * is implemented. Currently we fully emulate VMLAUNCH/VMRESUME in IEM.
     7649                         *
     7650                         * Requires fixing up when using hardware-assisted VMX:
     7651                         *   - VM-exit interruption info: Shouldn't reflect host interrupts/NMIs.
     7652                         *   - VM-exit interruption error code: Cleared to 0 when not appropriate.
     7653                         *   - IDT-vectoring info: Think about this.
     7654                         *   - IDT-vectoring error code: Think about this.
     7655                         *
     7656                         * Emulated:
     7657                         *   - Guest-interruptiblity state: Derived from FFs and RIP.
     7658                         *   - Guest pending debug exceptions: Derived from DR6.
     7659                         *   - Guest activity state: Emulated from EM state.
     7660                         *   - Guest PDPTEs: Currently all 0s since we don't support nested EPT.
     7661                         *   - Entry-interrupt info: Emulated, cleared to 0.
     7662                         */
     7663                        rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,       &pGstVmcs->u32RoExitIntInfo);
     7664                        rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pGstVmcs->u32RoExitIntErrCode);
     7665                        rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO,           &pGstVmcs->u32RoIdtVectoringInfo);
     7666                        rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,     &pGstVmcs->u32RoIdtVectoringErrCode);
     7667                        rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH,            &pGstVmcs->u32RoExitInstrLen);
     7668                        rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO,              &pGstVmcs->u32RoExitIntInfo);
     7669                        rc |= VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,         &pGstVmcs->u64RoGuestPhysAddr.u);
     7670                        rc |= VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR,            &pGstVmcs->u64RoGuestLinearAddr.u);
     7671                        /** @todo NSTVMX: Save and adjust preemption timer value. */
     7672                    }
     7673
     7674                    VMXLOCAL_BREAK_RC(rc);
     7675                }
     7676# endif
     7677#endif
    76177678            }
    76187679        } while (0);
     
    1066910730     * Check if VMLAUNCH/VMRESUME succeeded.
    1067010731     * If this failed, we cause a guru meditation and cease further execution.
     10732     *
     10733     * However, if we are executing a nested-guest we might fail if we use the
     10734     * fast path rather than fully emulating VMLAUNCH/VMRESUME instruction in IEM.
    1067110735     */
    1067210736    if (RT_LIKELY(rcVMRun == VINF_SUCCESS))
     
    1073410798        }
    1073510799    }
     10800#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     10801    else if (pVmxTransient->fIsNestedGuest)
     10802    {
     10803# if 0
     10804        /*
     10805         * Copy the VM-instruction error field to the guest VMCS.
     10806         */
     10807        /** @todo NSTVMX: Verify we're using the fast path. */
     10808        uint32_t u32RoVmInstrError;
     10809        rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &u32RoVmInstrError);
     10810        AssertRCReturn(rc, rc);
     10811        PVMXVVMCS pGstVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     10812        pGstVmcs->u32RoVmInstrError = u32RoVmInstrError;
     10813        /** @todo NSTVMX: Advance guest RIP and other fast path related restoration.  */
     10814# else
     10815        AssertMsgFailed(("VMLAUNCH/VMRESUME failed but shouldn't happen when VMLAUNCH/VMRESUME was emulated in IEM!\n"));
     10816# endif
     10817    }
     10818#endif
    1073610819    else
    1073710820        Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
     
    1243312516DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1243412517{
     12518    VBOXSTRICTRC   rcStrict = VINF_SUCCESS;
    1243512519    uint32_t const rcReason = pVmxTransient->uExitReason;
    1243612520    switch (rcReason)
    1243712521    {
    1243812522        case VMX_EXIT_EPT_MISCONFIG:
     12523            rcStrict = hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient);
     12524            break;
     12525
    1243912526        case VMX_EXIT_EPT_VIOLATION:
     12527            rcStrict = hmR0VmxExitEptViolation(pVCpu, pVmxTransient);
     12528            break;
     12529
    1244012530        case VMX_EXIT_IO_INSTR:
     12531        {
     12532            int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     12533            AssertRCReturn(rc, rc);
     12534
     12535            uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
     12536            uint8_t  const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
     12537            AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
     12538
     12539            /* Size of the I/O accesses in bytes. */
     12540            static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 };
     12541            uint8_t const cbAccess = s_aIOSizes[uIOSize];
     12542
     12543            if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
     12544            {
     12545                rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     12546                AssertRCReturn(rc, rc);
     12547
     12548                VMXVEXITINFO ExitInfo;
     12549                RT_ZERO(ExitInfo);
     12550                ExitInfo.uReason = pVmxTransient->uExitReason;
     12551                ExitInfo.cbInstr = pVmxTransient->cbInstr;
     12552                ExitInfo.u64Qual = pVmxTransient->uExitQual;
     12553                rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
     12554            }
     12555            else
     12556                rcStrict = hmR0VmxExitIoInstr(pVCpu, pVmxTransient);
     12557            break;
     12558        }
     12559
    1244112560        case VMX_EXIT_CPUID:
    1244212561        case VMX_EXIT_RDTSC:
     
    1250312622            return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient);
    1250412623    }
    12505 #undef VMEXIT_CALL_RET
     12624
     12625    return rcStrict;
    1250612626}
    1250712627#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     
    1408314203
    1408414204    /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
    14085     uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
    14086     uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual);
    14087     bool     fIOWrite     = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
    14088     bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
    14089     bool     fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
    14090     bool     fDbgStepping = pVCpu->hm.s.fSingleInstruction;
    14091     AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
     14205    uint32_t const uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
     14206    uint8_t  const uIOSize      = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
     14207    bool     const fIOWrite     = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
     14208    bool     const fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
     14209    bool     const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
     14210    bool     const fDbgStepping = pVCpu->hm.s.fSingleInstruction;
     14211    AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
    1409214212
    1409314213    /*
     
    1411214232        static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 };                    /* Size of the I/O accesses. */
    1411314233        static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff };   /* AND masks for saving result in AL/AX/EAX. */
    14114         uint32_t const cbValue  = s_aIOSizes[uIOWidth];
     14234        uint32_t const cbValue  = s_aIOSizes[uIOSize];
    1411514235        uint32_t const cbInstr  = pVmxTransient->cbInstr;
    1411614236        bool fUpdateRipAlready  = false; /* ugly hack, should be temporary. */
     
    1416114281             */
    1416214282            Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    14163             uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
     14283            uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
    1416414284            Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
    1416514285            if (fIOWrite)
     
    1428814408              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    1428914409              VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
    14290               fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
     14410              fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
    1429114411
    1429214412        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette