VirtualBox

Changeset 72881 in vbox


Ignore:
Timestamp:
Jul 4, 2018 3:19:39 PM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
123426
Message:

EM,HM,IEM: Replaced EMInterpretRdmsr with IEMExecDecodedRdmsr.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r72878 r72881  
    66726672 * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr.
    66736673 *
    6674  * @returns VBox status code.
     6674 * @returns Strict VBox status code.
    66756675 * @param   pVCpu       The cross context virtual CPU structure.
    66766676 * @param   pVmcb       Pointer to the VM control block.
    66776677 */
    6678 static int hmR0SvmExitReadMsr(PVMCPU pVCpu, PSVMVMCB pVmcb)
     6678static VBOXSTRICTRC hmR0SvmExitReadMsr(PVMCPU pVCpu, PSVMVMCB pVmcb)
    66796679{
    66806680    PCPUMCTX pCtx  = &pVCpu->cpum.GstCtx;
    6681     HMSVM_CPUMCTX_IMPORT_STATE(pVCpu,   CPUMCTX_EXTRN_CR0
    6682                                       | CPUMCTX_EXTRN_RFLAGS
    6683                                       | CPUMCTX_EXTRN_SS
    6684                                       | CPUMCTX_EXTRN_ALL_MSRS);
    6685 
    66866681    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
    66876682    Log4Func(("idMsr=%#RX32\n", pCtx->ecx));
    66886683
     6684    VBOXSTRICTRC rcStrict;
    66896685    bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
    66906686    if (fSupportsNextRipSave)
    66916687    {
    6692         int rc = EMInterpretRdmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    6693         if (RT_LIKELY(rc == VINF_SUCCESS))
    6694         {
    6695             pCtx->rip = pVmcb->ctrl.u64NextRIP;
    6696             HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    6697             return VINF_SUCCESS;
    6698         }
    6699 
    6700         AssertMsg(   rc == VERR_EM_INTERPRETER
    6701                   || rc == VINF_CPUM_R3_MSR_READ, ("EMInterpretRdmsr failed rc=%Rrc\n", rc));
    6702         return rc;
    6703     }
    6704 
    6705     HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    6706     int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0));
    6707     if (RT_UNLIKELY(rc != VINF_SUCCESS))
    6708     {
    6709         AssertMsg(   rc == VERR_EM_INTERPRETER
    6710                   || rc == VINF_CPUM_R3_MSR_READ, ("EMInterpretInstruction failed rc=%Rrc\n", rc));
    6711     }
    6712     /* RIP updated by EMInterpretInstruction(). */
    6713     HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    6714     return rc;
     6688        /** @todo Optimize this: Only retrieve the MSR bits we need here. CPUMAllMsrs.cpp
     6689         *  can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */
     6690        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
     6691        rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip);
     6692        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     6693            HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);     /* RIP updated by IEMExecDecodedRdmsr(). */
     6694        else
     6695            AssertMsg(   rcStrict == VINF_IEM_RAISED_XCPT
     6696                      || rcStrict == VINF_CPUM_R3_MSR_WRITE,
     6697                      ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     6698    }
     6699    else
     6700    {
     6701        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
     6702        rcStrict = IEMExecOne(pVCpu);
     6703        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     6704            HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);     /* RIP updated by IEMExecOne(). */
     6705        else
     6706            AssertMsg(   rcStrict == VINF_IEM_RAISED_XCPT
     6707                      || rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecOne status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     6708    }
     6709    return rcStrict;
    67156710}
    67166711
     
    67196714 * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr.
    67206715 *
    6721  * @returns VBox status code.
     6716 * @returns Strict VBox status code.
    67226717 * @param   pVCpu           The cross context virtual CPU structure.
    67236718 * @param   pVmcb       Pointer to the VM control block.
    67246719 * @param   pSvmTransient   Pointer to the SVM-transient structure.
    67256720 */
    6726 static int hmR0SvmExitWriteMsr(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)
     6721static VBOXSTRICTRC hmR0SvmExitWriteMsr(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)
    67276722{
    67286723    PCPUMCTX pCtx  = &pVCpu->cpum.GstCtx;
    67296724    uint32_t const idMsr = pCtx->ecx;
    6730     /** @todo Optimize this: We don't need to get much of the MSR state here
    6731      * since we're only updating.  CPUMAllMsrs.cpp can ask for what it needs and
    6732      * clear the applicable extern flags. */
    6733     HMSVM_CPUMCTX_IMPORT_STATE(pVCpu,   CPUMCTX_EXTRN_CR0
    6734                                       | CPUMCTX_EXTRN_RFLAGS
    6735                                       | CPUMCTX_EXTRN_SS
    6736                                       | CPUMCTX_EXTRN_ALL_MSRS
    6737                                       | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    6738 
    67396725    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
    67406726    Log4Func(("idMsr=%#RX32\n", idMsr));
     
    67686754    if (fSupportsNextRipSave)
    67696755    {
     6756        /** @todo Optimize this: We don't need to get much of the MSR state here
     6757         * since we're only updating.  CPUMAllMsrs.cpp can ask for what it needs and
     6758         * clear the applicable extern flags. */
     6759        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
    67706760        rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip);
    67716761        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    6772             HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
     6762            HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);     /* RIP updated by IEMExecDecodedWrmsr(). */
    67736763        else
    67746764            AssertMsg(   rcStrict == VINF_IEM_RAISED_XCPT
     
    67786768    else
    67796769    {
    6780         HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     6770        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS);
    67816771        rcStrict = IEMExecOne(pVCpu);
    67826772        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    6783             HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);     /* RIP updated by EMInterpretInstruction(). */
     6773            HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);     /* RIP updated by IEMExecOne(). */
    67846774        else
    67856775            AssertMsg(   rcStrict == VINF_IEM_RAISED_XCPT
     
    68156805    }
    68166806
    6817     return VBOXSTRICTRC_TODO(rcStrict);
     6807    return rcStrict;
    68186808}
    68196809
     
    68296819    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    68306820    if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ)
    6831         return hmR0SvmExitReadMsr(pVCpu, pVmcb);
     6821        return VBOXSTRICTRC_TODO(hmR0SvmExitReadMsr(pVCpu, pVmcb));
    68326822
    68336823    Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE);
    6834     return hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient);
     6824    return VBOXSTRICTRC_TODO(hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient));
    68356825}
    68366826
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72878 r72881  
    1183911839    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    1184011840
    11841     /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. FS, GS (base) can be accessed by MSR reads. */
    11842     int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_CR0
    11843                                             | CPUMCTX_EXTRN_RFLAGS
    11844                                             | CPUMCTX_EXTRN_SS
    11845                                             | CPUMCTX_EXTRN_FS
    11846                                             | CPUMCTX_EXTRN_GS);
    11847     if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
    11848         rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
     11841    /** @todo Optimize this: We currently drag in in the whole MSR state
     11842     * (CPUMCTX_EXTRN_ALL_MSRS) here.  We should optimize this to only get
     11843     * MSRs required.  That would require changes to IEM and possibly CPUM too.
     11844     * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
     11845    uint32_t const idMsr = pMixedCtx->ecx;  NOREF(idMsr); /* Save it. */
     11846    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     11847    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
    1184911848    AssertRCReturn(rc, rc);
    11850     Log4Func(("ecx=%#RX32\n", pMixedCtx->ecx));
     11849
     11850    Log4Func(("ecx=%#RX32\n", idMsr));
    1185111851
    1185211852#ifdef VBOX_STRICT
    1185311853    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    1185411854    {
    11855         if (   hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
    11856             && pMixedCtx->ecx != MSR_K6_EFER)
    11857         {
    11858             AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
    11859                              pMixedCtx->ecx));
     11855        if (   hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)
     11856            && idMsr != MSR_K6_EFER)
     11857        {
     11858            AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
    1186011859            HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1186111860        }
    11862         if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
     11861        if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    1186311862        {
    1186411863            VMXMSREXITREAD  enmRead;
    1186511864            VMXMSREXITWRITE enmWrite;
    11866             int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);
     11865            int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite);
    1186711866            AssertRCReturn(rc2, rc2);
    1186811867            if (enmRead == VMXMSREXIT_PASSTHRU_READ)
    1186911868            {
    11870                 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
     11869                AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
    1187111870                HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1187211871            }
     
    1187511874#endif
    1187611875
    11877     PVM pVM = pVCpu->CTX_SUFF(pVM);
    11878     rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
    11879     AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
    11880               ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
     11876    VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbInstr);
    1188111877    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
    11882     if (RT_SUCCESS(rc))
    11883     {
    11884         rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    11885         Assert(pVmxTransient->cbInstr == 2);
    11886     }
    11887     return rc;
     11878    AssertMsg(   rcStrict == VINF_SUCCESS
     11879              || rcStrict == VINF_CPUM_R3_MSR_READ
     11880              || rcStrict == VINF_IEM_RAISED_XCPT,
     11881              ("Unexpected IEMExecDecodedRdmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     11882
     11883    return rcStrict;
    1188811884}
    1188911885
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette