VirtualBox

Changeset 75387 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Nov 12, 2018 5:59:11 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:9180 Virtualized x2APIC MSR write access handling.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r74648 r75387  
    2525#include <VBox/vmm/hm.h>
    2626#include <VBox/vmm/hm_vmx.h>
     27#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     28# include <VBox/vmm/iem.h>
     29#endif
    2730#include <VBox/vmm/tm.h>
    2831#include <VBox/vmm/gim.h>
     
    12711274{
    12721275    RT_NOREF_PV(pRange);
     1276#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1277    if (   CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)
     1278        && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VIRT_X2APIC_MODE))
     1279    {
     1280        /** @todo NSTVMX: perhaps IEMExecVmxVirtApicAccessMsr should be moved to
     1281         *        HMVMXAll.cpp? */
     1282        VBOXSTRICTRC rcStrict = IEMExecVmxVirtApicAccessMsr(pVCpu, idMsr, puValue, false /* fWrite */);
     1283        Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_OUT_OF_RANGE || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE);
     1284        if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     1285        {
     1286            if (rcStrict == VERR_OUT_OF_RANGE)
     1287                return VERR_CPUM_RAISE_GP_0;
     1288            Assert(rcStrict == VINF_SUCCESS);
     1289            return VINF_SUCCESS;
     1290        }
     1291    }
     1292#endif
    12731293    return APICReadMsr(pVCpu, idMsr, puValue);
    12741294}
     
    12791299{
    12801300    RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
     1301#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1302    if (   CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)
     1303        && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.s.Guest, VMX_PROC_CTLS2_VIRT_X2APIC_MODE))
     1304    {
     1305        /** @todo NSTVMX: perhaps IEMExecVmxVirtApicAccessMsr should be moved to
     1306         *        HMVMXAll.cpp? */
     1307        VBOXSTRICTRC rcStrict = IEMExecVmxVirtApicAccessMsr(pVCpu, idMsr, &uValue, true /* fWrite */);
     1308        Assert(rcStrict == VINF_SUCCESS || rcStrict == VERR_OUT_OF_RANGE || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE);
     1309        if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     1310        {
     1311            if (rcStrict == VERR_OUT_OF_RANGE)
     1312                return VERR_CPUM_RAISE_GP_0;
     1313            Assert(rcStrict == VINF_SUCCESS);
     1314            return VINF_SUCCESS;
     1315        }
     1316    }
     1317#endif
    12811318    return APICWriteMsr(pVCpu, idMsr, uValue);
    12821319}
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r75320 r75387  
    989989IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
    990990IEM_STATIC VBOXSTRICTRC     iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
     991IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
     992IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
    991993#endif
    992994
     
    1569615698
    1569715699/**
     15700 * Interface for HM and EM to virtualize x2APIC MSR accesses.
     15701 *
     15702 * @returns Strict VBox status code.
     15703 * @retval  VINF_SUCCESS if the MSR access was virtualized.
     15704 * @retval  VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
     15705 *          the x2APIC device.
     15706 * @retval  VERR_OUT_RANGE if the caller must raise \#GP(0).
     15707 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     15708 * @param   idMsr       The MSR being read.
     15709 * @param   pu64Value   Pointer to the value being written or where to store the
     15710 *                      value being read.
     15711 * @param   fWrite      Whether this is an MSR write or read access.
     15712 * @thread  EMT(pVCpu)
     15713 */
     15714VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
     15715{
     15716    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
     15717    Assert(pu64Value);
     15718
     15719    VBOXSTRICTRC rcStrict;
     15720    if (!fWrite)
     15721        rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
     15722    else
     15723        rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
     15724    if (pVCpu->iem.s.cActiveMappings)
     15725        iemMemRollback(pVCpu);
     15726    return iemExecStatusCodeFiddling(pVCpu, rcStrict);
     15727
     15728}
     15729
     15730
     15731/**
    1569815732 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
    1569915733 *
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r75320 r75387  
    39763976
    39773977/**
     3978 * Reads a 64-bit register from the virtual-APIC page at the given offset.
     3979 *
     3980 * @returns The register from the virtual-APIC page.
     3981 * @param   pVCpu       The cross context virtual CPU structure.
     3982 * @param   offReg      The offset of the register being read.
     3983 */
     3984DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
     3985{
     3986    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
     3987    uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
     3988    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
     3989    uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
     3990    return uReg;
     3991}
     3992
     3993
     3994/**
    39783995 * Writes a 32-bit register to the virtual-APIC page at the given offset.
    39793996 *
     
    39884005    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    39894006    *(uint32_t *)(pbVirtApic + offReg) = uReg;
     4007}
     4008
     4009
     4010/**
     4011 * Writes a 64-bit register to the virtual-APIC page at the given offset.
     4012 *
     4013 * @param   pVCpu       The cross context virtual CPU structure.
     4014 * @param   offReg      The offset of the register being written.
     4015 * @param   uReg        The register value to write.
     4016 */
     4017DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
     4018{
     4019    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
     4020    uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
     4021    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
     4022    *(uint64_t *)(pbVirtApic + offReg) = uReg;
    39904023}
    39914024
     
    42304263        /*
    42314264         * Record the currently updated APIC offset, as we need this later for figuring
    4232          * out what to do as well as the exit qualification when causing an APIC-write VM-exit.
     4265         * out whether to perform TPR, EOI or self-IPI virtualization as well as well
     4266         * as for supplying the exit qualification when causing an APIC-write VM-exit.
    42334267         */
    42344268        pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offAccess;
     
    42734307
    42744308    return VINF_VMX_MODIFIES_BEHAVIOR;
     4309}
     4310
     4311
     4312/**
     4313 * Virtualizes an MSR-based APIC read access.
     4314 *
     4315 * @returns VBox strict status code.
     4316 * @retval  VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
     4317 * @retval  VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
     4318 *          handled by the x2APIC device.
     4319 * @retval  VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
     4320 *          not within the range of valid MSRs, caller must raise \#GP(0).
     4321 * @param   pVCpu       The cross context virtual CPU structure.
     4322 * @param   idMsr       The x2APIC MSR being read.
     4323 * @param   pu64Value   Where to store the read x2APIC MSR value (only valid when
     4324 *                      VINF_VMX_MODIFIES_BEHAVIOR is returned).
     4325 */
     4326IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
     4327{
     4328    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4329    Assert(pVmcs);
     4330    Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
     4331    Assert(pu64Value);
     4332
     4333    if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
     4334    {
     4335        /*
     4336         * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
     4337         * what the end of the valid x2APIC MSR range is. Hence the use of different
     4338         * macros here.
     4339         *
     4340         * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
     4341         * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
     4342         */
     4343        if (   idMsr >= VMX_V_VIRT_APIC_MSR_START
     4344            && idMsr <= VMX_V_VIRT_APIC_MSR_END)
     4345        {
     4346            uint16_t const offReg   = (idMsr & 0xff) << 4;
     4347            uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
     4348            *pu64Value = u64Value;
     4349            return VINF_VMX_MODIFIES_BEHAVIOR;
     4350        }
     4351        return VERR_OUT_OF_RANGE;
     4352    }
     4353
     4354    if (idMsr == MSR_IA32_X2APIC_TPR)
     4355    {
     4356        uint16_t const offReg   = (idMsr & 0xff) << 4;
     4357        uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
     4358        *pu64Value = u64Value;
     4359        return VINF_VMX_MODIFIES_BEHAVIOR;
     4360    }
     4361
     4362    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
     4363}
     4364
     4365
     4366/**
     4367 * Virtualizes an MSR-based APIC write access.
     4368 *
     4369 * @returns VBox strict status code.
     4370 * @retval  VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
     4371 * @retval  VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
     4372 *          not within the range of valid MSRs, caller must raise \#GP(0).
     4373 *
     4374 * @param   pVCpu       The cross context virtual CPU structure.
     4375 * @param   idMsr       The x2APIC MSR being written.
     4376 * @param   u64Value    The value of the x2APIC MSR being written.
     4377 */
     4378IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
     4379{
     4380    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4381    Assert(pVmcs);
     4382
     4383    /*
     4384     * Check if the access is to be virtualized.
     4385     * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
     4386     */
     4387    if (   idMsr == MSR_IA32_X2APIC_TPR
     4388        || (   (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
     4389            && (   idMsr == MSR_IA32_X2APIC_EOI
     4390                || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
     4391    {
     4392        /* Validate the MSR write depending on the register. */
     4393        switch (idMsr)
     4394        {
     4395            case MSR_IA32_X2APIC_TPR:
     4396            case MSR_IA32_X2APIC_SELF_IPI:
     4397            {
     4398                if (u64Value & UINT64_C(0xffffffffffffff00))
     4399                    return VERR_OUT_OF_RANGE;
     4400                break;
     4401            }
     4402            case MSR_IA32_X2APIC_EOI:
     4403            {
     4404                if (u64Value != 0)
     4405                    return VERR_OUT_OF_RANGE;
     4406                break;
     4407            }
     4408        }
     4409
     4410        /* Write the MSR to the virtual-APIC page. */
     4411        uint16_t const offReg = (idMsr & 0xff) << 4;
     4412        iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
     4413
     4414        /*
     4415         * Record the currently updated APIC offset, as we need this later for figuring
     4416         * out whether to perform TPR, EOI or self-IPI virtualization as well as well
     4417         * as for supplying the exit qualification when causing an APIC-write VM-exit.
     4418         */
     4419        pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offReg;
     4420        VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
     4421
     4422        return VINF_VMX_MODIFIES_BEHAVIOR;
     4423    }
     4424
     4425    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
    42754426}
    42764427
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette