VirtualBox

Changeset 74603 in vbox for trunk/src


Ignore:
Timestamp:
Oct 4, 2018 6:07:20 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM, HM: Nested VMX: bugref:9180 VM-exit bits; LMSW intercept. Separated VINF_HM_INTERCEPT_NOT_ACTIVE into VMX and SVM
specific codes. Adjusted IEMExecDecodedLmsw to supply the additional memory operand parameter from the VMCS guest-linear address
field.

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r74565 r74603  
    54925492             */
    54935493            VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
    5494             if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
     5494            if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    54955495                return rcStrict0;
    54965496        }
     
    1507915079 *
    1508015080 * @returns Strict VBox status code.
    15081  * @param   pVCpu       The cross context virtual CPU structure.
    15082  * @param   cbInstr     The instruction length in bytes.
    15083  * @param   uValue      The value to load into CR0.
     15081 * @param   pVCpu           The cross context virtual CPU structure.
     15082 * @param   cbInstr         The instruction length in bytes.
     15083 * @param   uValue          The value to load into CR0.
     15084 * @param   GCPtrEffDst     The guest-linear address if the LMSW instruction has a
     15085 *                          memory operand. Otherwise pass NIL_RTGCPTR.
    1508415086 *
    1508515087 * @remarks In ring-0 not all of the state needs to be synced in.
    1508615088 */
    15087 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
     15089VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
    1508815090{
    1508915091    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    1509015092
    1509115093    iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15092     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
     15094    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
    1509315095    Assert(!pVCpu->iem.s.cActiveMappings);
    1509415096    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r74592 r74603  
    57435743 *
    57445744 * @param   u16NewMsw       The new value.
    5745  */
    5746 IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
     5745 * @param   GCPtrEffDst     The guest-linear address of the source operand in case
     5746 *                          of a memory operand. For register operand, pass
     5747 *                          NIL_RTGCPTR.
     5748 */
     5749IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
    57475750{
    57485751    if (pVCpu->iem.s.uCpl != 0)
    57495752        return iemRaiseGeneralProtectionFault0(pVCpu);
    57505753    Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
     5754    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
     5755
     5756#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     5757    /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
     5758    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
     5759    {
     5760        VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
     5761        if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     5762            return rcStrict;
     5763    }
     5764#else
     5765    RT_NOREF_PV(GCPtrEffDst);
     5766#endif
    57515767
    57525768    /*
    57535769     * Compose the new CR0 value and call common worker.
    57545770     */
    5755     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    5756     uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0     & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
     5771    uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0  & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    57575772    uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    57585773    return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
     
    63706385        if (rcStrict == VINF_SVM_VMEXIT)
    63716386            return VINF_SUCCESS;
    6372         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6387        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    63736388        {
    63746389            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
     
    64466461        if (rcStrict == VINF_SVM_VMEXIT)
    64476462            return VINF_SUCCESS;
    6448         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6463        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    64496464        {
    64506465            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
     
    65266541        if (rcStrict == VINF_SVM_VMEXIT)
    65276542            return VINF_SUCCESS;
    6528         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6543        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    65296544        {
    65306545            Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
     
    66196634        if (rcStrict == VINF_SVM_VMEXIT)
    66206635            return VINF_SUCCESS;
    6621         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     6636        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    66226637        {
    66236638            Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h

    r74336 r74603  
    12141214        if (rcStrict == VINF_SVM_VMEXIT)
    12151215            return VINF_SUCCESS;
    1216         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1216        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    12171217        {
    12181218            Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8,
     
    12851285        if (rcStrict == VINF_SVM_VMEXIT)
    12861286            return VINF_SUCCESS;
    1287         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1287        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    12881288        {
    12891289            Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
     
    14861486        if (rcStrict == VINF_SVM_VMEXIT)
    14871487            return VINF_SUCCESS;
    1488         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1488        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    14891489        {
    14901490            Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8,
     
    15451545        if (rcStrict == VINF_SVM_VMEXIT)
    15461546            return VINF_SUCCESS;
    1547         if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE)
     1547        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    15481548        {
    15491549            Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r74357 r74603  
    947947    }
    948948
    949     return VINF_HM_INTERCEPT_NOT_ACTIVE;
     949    return VINF_SVM_INTERCEPT_NOT_ACTIVE;
    950950}
    951951
     
    997997     *        intercepts). */
    998998    AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
    999     return VINF_HM_INTERCEPT_NOT_ACTIVE;
     999    return VINF_SVM_INTERCEPT_NOT_ACTIVE;
    10001000}
    10011001
     
    10611061        return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);
    10621062    }
    1063     return VINF_HM_INTERCEPT_NOT_ACTIVE;
     1063    return VINF_SVM_INTERCEPT_NOT_ACTIVE;
    10641064}
    10651065
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74592 r74603  
    681681        case VMX_VMCS_RO_IO_RDI:
    682682        case VMX_VMCS_RO_IO_RIP:
    683         case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR:          return true;
     683        case VMX_VMCS_RO_GUEST_LINEAR_ADDR:               return true;
    684684
    685685        /* Guest-state fields. */
     
    26082608     * instruction execution.
    26092609     *
    2610      * In our implementation, all undefined fields are generally cleared (caller's
    2611      * responsibility).
     2610     * In our implementation in IEM, all undefined fields are generally cleared. However,
     2611     * if the caller supplies information (from say the physical CPU directly) it is
     2612     * then possible that the undefined fields not cleared.
    26122613     *
    26132614     * See Intel spec. 27.2.1 "Basic VM-Exit Information".
     
    27552756
    27562757    return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
     2758}
     2759
     2760
     2761/**
     2762 * VMX VM-exit handler for VM-exits due to LMSW.
     2763 *
     2764 * @returns Strict VBox status code.
     2765 * @param   pVCpu           The cross context virtual CPU structure.
     2766 * @param   uGuestCr0       The current guest CR0.
     2767 * @param   pu16NewMsw      The machine-status word specified in LMSW's source
     2768 *                          operand. This will be updated depending on the VMX
     2769 *                          guest/host CR0 mask if LMSW is not intercepted.
     2770 * @param   GCPtrEffDst     The guest-linear address of the source operand in case
     2771 *                          of a memory operand. For register operand, pass
     2772 *                          NIL_RTGCPTR.
     2773 * @param   cbInstr         The instruction length (in bytes).
     2774 */
     2775IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
     2776                                              uint8_t cbInstr)
     2777{
     2778    /*
     2779     * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
     2780     *
     2781     * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
     2782     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
     2783     */
     2784    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2785    Assert(pVmcs);
     2786    Assert(pu16NewMsw);
     2787
     2788    bool fIntercept = false;
     2789    uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
     2790    uint32_t const fReadShadow  = pVmcs->u64Cr0ReadShadow.u;
     2791
     2792    /*
     2793     * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
     2794     * CR0.PE case first, before the rest of the bits in the MSW.
     2795     *
     2796     * If CR0.PE is owned by the host and CR0.PE differs between the
     2797     * MSW (source operand) and the read-shadow, we must cause a VM-exit.
     2798     */
     2799    if (    (fGstHostMask & X86_CR0_PE)
     2800        &&  (*pu16NewMsw  & X86_CR0_PE)
     2801        && !(fReadShadow  & X86_CR0_PE))
     2802        fIntercept = true;
     2803
     2804    /*
     2805     * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
     2806     * bits differ between the MSW (source operand) and the read-shadow, we must
     2807     * cause a VM-exit.
     2808     */
     2809    uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
     2810    if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask))
     2811        fIntercept = true;
     2812
     2813    if (fIntercept)
     2814    {
     2815        Log2(("lmsw: Guest intercept -> VM-exit\n"));
     2816
     2817        VMXVEXITINFO ExitInfo;
     2818        RT_ZERO(ExitInfo);
     2819        ExitInfo.uReason = VMX_EXIT_MOV_CRX;
     2820        ExitInfo.cbInstr = cbInstr;
     2821
     2822        bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
     2823        if (fMemOperand)
     2824        {
     2825            Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
     2826            ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
     2827        }
     2828
     2829        ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER,  0) /* CR0 */
     2830                         | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS,    VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
     2831                         | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP,   fMemOperand)
     2832                         | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw);
     2833
     2834        return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
     2835    }
     2836
     2837    /*
     2838     * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
     2839     * CR0 guest/host mask must be left unmodified.
     2840     *
     2841     * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
     2842     */
     2843    fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
     2844    *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask);
     2845
     2846    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
    27572847}
    27582848
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r74155 r74603  
    547547    {
    548548        IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    549         IEM_MC_BEGIN(1, 0);
    550         IEM_MC_ARG(uint16_t, u16Tmp, 0);
     549        IEM_MC_BEGIN(2, 0);
     550        IEM_MC_ARG(uint16_t, u16Tmp,                         0);
     551        IEM_MC_ARG_CONST(RTGCPTR,  GCPtrEffDst, NIL_RTGCPTR, 1);
    551552        IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB);
    552         IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
    553         IEM_MC_END();
    554     }
    555     else
    556     {
    557         IEM_MC_BEGIN(1, 1);
    558         IEM_MC_ARG(uint16_t, u16Tmp, 0);
    559         IEM_MC_LOCAL(RTGCPTR,  GCPtrEffDst);
     553        IEM_MC_CALL_CIMPL_2(iemCImpl_lmsw, u16Tmp, GCPtrEffDst);
     554        IEM_MC_END();
     555    }
     556    else
     557    {
     558        IEM_MC_BEGIN(2, 0);
     559        IEM_MC_ARG(uint16_t, u16Tmp,      0);
     560        IEM_MC_ARG(RTGCPTR,  GCPtrEffDst, 1);
    560561        IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
    561562        IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    562563        IEM_MC_FETCH_MEM_U16(u16Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst);
    563         IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
     564        IEM_MC_CALL_CIMPL_2(iemCImpl_lmsw, u16Tmp, GCPtrEffDst);
    564565        IEM_MC_END();
    565566    }
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r74545 r74603  
    7878#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE  RT_BIT_32(5)
    7979#define HMVMX_READ_EXIT_INSTR_INFO               RT_BIT_32(6)
     80#define HMVMX_READ_GUEST_LINEAR_ADDR             RT_BIT_32(7)
    8081/** @} */
    8182
     
    263264    /** The VM-exit exit code qualification. */
    264265    uint64_t            uExitQual;
     266    /** The Guest-linear address. */
     267    uint64_t            uGuestLinearAddr;
    265268
    266269    /** The VM-exit interruption-information field. */
     
    724727
    725728/**
    726  * Reads the exit code qualification from the VMCS into the VMX transient
    727  * structure.
     729 * Reads the VM-exit Qualification from the VMCS into the VMX transient structure.
    728730 *
    729731 * @returns VBox status code.
     
    739741        AssertRCReturn(rc, rc);
    740742        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
     743    }
     744    return VINF_SUCCESS;
     745}
     746
     747
     748/**
     749 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
     750 *
     751 * @returns VBox status code.
     752 * @param   pVCpu           The cross context virtual CPU structure of the
     753 *                          calling EMT. (Required for the VMCS cache case.)
     754 * @param   pVmxTransient   Pointer to the VMX transient structure.
     755 */
     756DECLINLINE(int) hmR0VmxReadGuestLinearAddrVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     757{
     758    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
     759    {
     760        int rc = VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr); NOREF(pVCpu);
     761        AssertRCReturn(rc, rc);
     762        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
    741763    }
    742764    return VINF_SUCCESS;
     
    1228212304        {
    1228312305            /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */
    12284             rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual));
     12306            rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
     12307            AssertRCReturn(rc, rc);
     12308            rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual),
     12309                                          pVmxTransient->uGuestLinearAddr);
    1228512310            AssertMsg(   rcStrict == VINF_SUCCESS
    1228612311                      || rcStrict == VINF_IEM_RAISED_XCPT
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette