VirtualBox

Changeset 75510 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Nov 16, 2018 8:36:57 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 Try to unify signalling of TPR/EOI/Self-IPI virtualization/APIC-write emulation operations. Also remove
the duplicate handling of virtualization of x2APIC MSR accesses (as distinct from MSR intercepts). Currently we handle it in CPUM,
and IEM calls into CPUM so it should be handled fine, no need to handle it in IEM too.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r75507 r75510  
    59095909                uint32_t const uVTpr = (uNewCrX & 0xf) << 4;
    59105910                iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uVTpr);
    5911                 rcStrict = iemVmxVmexitTprVirtualization(pVCpu, cbInstr);
    5912                 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
    5913                     return rcStrict;
    5914                 rcStrict = VINF_SUCCESS;
     5911                iemVmxVirtApicSignalAction(pVCpu, XAPIC_OFF_TPR);
    59155912                break;
    59165913            }
     
    66936690        if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
    66946691            IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
    6695 
    6696         /** @todo NSTVMX: Handle other x2APIC MSRs in VMX non-root mode. Perhaps having a
    6697          *        dedicated virtual-APIC device might be better... */
    6698         if (   pVCpu->cpum.GstCtx.ecx == MSR_IA32_X2APIC_TPR
    6699             && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_X2APIC_MODE))
    6700         {
    6701             uint32_t const uVTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
    6702             pVCpu->cpum.GstCtx.rax = uVTpr;
    6703             pVCpu->cpum.GstCtx.rdx = 0;
    6704             iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    6705             return VINF_SUCCESS;
    6706         }
    67076692    }
    67086693#endif
     
    67796764    uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
    67806765
     6766    uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
     6767
    67816768    /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
    67826769    IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
     
    67886775    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    67896776    {
    6790         if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, pVCpu->cpum.GstCtx.ecx))
     6777        if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
    67916778            IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
    6792 
    6793         /* Check x2APIC MSRs first. */
    6794         if (IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_X2APIC_MODE))
    6795         {
    6796             switch (pVCpu->cpum.GstCtx.ecx)
    6797             {
    6798                 case MSR_IA32_X2APIC_TPR:
    6799                 {
    6800                     if (   !uValue.s.Hi
    6801                         && !(uValue.s.Lo & UINT32_C(0xffffff00)))
    6802                     {
    6803                         uint32_t const uVTpr = uValue.s.Lo;
    6804                         iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uVTpr);
    6805                         VBOXSTRICTRC rcStrict = iemVmxVmexitTprVirtualization(pVCpu, cbInstr);
    6806                         if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
    6807                             return rcStrict;
    6808                         return VINF_SUCCESS;
    6809                     }
    6810                     Log(("IEM: Invalid TPR MSR write (%#x,%#x) -> #GP(0)\n", uValue.s.Hi, uValue.s.Lo));
    6811                     return iemRaiseGeneralProtectionFault0(pVCpu);
    6812                 }
    6813 
    6814                 case MSR_IA32_X2APIC_EOI:
    6815                 case MSR_IA32_X2APIC_SELF_IPI:
    6816                 {
    6817                     /** @todo NSTVMX: EOI and Self-IPI virtualization. */
    6818                     break;
    6819                 }
    6820             }
    6821         }
    6822         else if (pVCpu->cpum.GstCtx.ecx == MSR_IA32_BIOS_UPDT_TRIG)
    6823         {
    6824             /** @todo NSTVMX: We must not allow any microcode updates in VMX non-root mode.
    6825              *        Since we don't implement this MSR anyway it's currently not a problem.
    6826              *        If we do, we should probably move this check to the MSR handler.  */
    6827         }
    6828         else if (pVCpu->cpum.GstCtx.ecx == MSR_IA32_RTIT_CTL)
    6829         {
    6830             /** @todo NSTVMX: We don't support Intel PT yet. When we do, this MSR must #GP
    6831              *        when IntelPT is not supported in VMX. */
    6832         }
    68336779    }
    68346780#endif
     
    68376783    if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
    68386784    {
    6839         VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, true /* fWrite */);
     6785        VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */);
    68406786        if (rcStrict == VINF_SVM_VMEXIT)
    68416787            return VINF_SUCCESS;
    68426788        if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    68436789        {
    6844             Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
     6790            Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
    68456791            return rcStrict;
    68466792        }
     
    68516797     * Do the job.
    68526798     */
    6853     VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, uValue.u);
     6799    VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
    68546800    if (rcStrict == VINF_SUCCESS)
    68556801    {
     
    68626808    if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
    68636809    {
    6864         Log(("IEM: wrmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
     6810        Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
    68656811        return rcStrict;
    68666812    }
     
    68716817    {
    68726818        pVCpu->iem.s.cLogRelWrMsr++;
    6873         LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx, uValue.s.Hi, uValue.s.Lo));
     6819        LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
    68746820    }
    68756821    else
    6876         Log((   "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx, uValue.s.Hi, uValue.s.Lo));
     6822        Log((   "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
    68776823    AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
    68786824    return iemRaiseGeneralProtectionFault0(pVCpu);
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r75507 r75510  
    911911
    912912/**
     913 * Signal that a virtual-APIC action needs to be performed at a later time (post
     914 * instruction execution).
     915 *
     916 * @param   pVCpu       The cross context virtual CPU structure.
     917 * @param   offApic     The virtual-APIC page offset that was updated pertaining to
     918 *                      the event.
     919 */
     920DECLINLINE(void) iemVmxVirtApicSignalAction(PVMCPU pVCpu, uint16_t offApic)
     921{
     922    Assert(offApic < XAPIC_OFF_END + 4);
     923
     924    /*
     925     * Record the currently updated APIC offset, as we need this later for figuring
     926     * out whether to perform TPR, EOI or self-IPI virtualization as well as well
     927     * as for supplying the exit qualification when causing an APIC-write VM-exit.
     928     */
     929    pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
     930
     931    /*
     932     * Signal that we need to perform a virtual-APIC action (TPR/PPR/EOI/Self-IPI
     933     * virtualization or APIC-write emulation).
     934     */
     935    if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC))
     936        VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
     937}
     938
     939
     940/**
    913941 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
    914942 * mask and the read-shadow (CR0/CR4 read).
     
    43144342    {
    43154343        /*
    4316          * Record the currently updated APIC offset, as we need this later for figuring
    4317          * out whether to perform TPR, EOI or self-IPI virtualization as well as well
    4318          * as for supplying the exit qualification when causing an APIC-write VM-exit.
    4319          */
    4320         pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offAccess;
    4321 
    4322         /*
    43234344         * A write access to the APIC-access page that is virtualized (rather than
    43244345         * causing a VM-exit) writes data to the virtual-APIC page.
     
    43284349
    43294350        /*
     4351         * Record the currently updated APIC offset, as we need this later for figuring
     4352         * out whether to perform TPR, EOI or self-IPI virtualization as well as well
     4353         * as for supplying the exit qualification when causing an APIC-write VM-exit.
     4354         *
    43304355         * After completion of the current operation, we need to perform TPR virtualization,
    43314356         * EOI virtualization or APIC-write VM-exit depending on which register was written.
     
    43394364         * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
    43404365         */
    4341         VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
     4366        iemVmxVirtApicSignalAction(pVCpu, offAccess);
    43424367    }
    43434368    else
     
    44234448 * @retval  VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
    44244449 *          not within the range of valid MSRs, caller must raise \#GP(0).
     4450 * @retval  VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
    44254451 *
    44264452 * @param   pVCpu       The cross context virtual CPU structure.
     
    44694495         * as for supplying the exit qualification when causing an APIC-write VM-exit.
    44704496         */
    4471         pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offReg;
    4472         VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
     4497        iemVmxVirtApicSignalAction(pVCpu, offReg);
    44734498
    44744499        return VINF_VMX_MODIFIES_BEHAVIOR;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette