VirtualBox

Changeset 75301 in vbox


Ignore:
Timestamp:
Nov 7, 2018 10:28:57 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:9180 VM-exit bits; APIC-access and APIC-write infrastructure. Handling of instruction/event boundary
pending APIC bits todo.

Location:
trunk
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpumctx.h

    r75107 r75301  
    661661                /** 0x398 - Guest TSC timestamp of VM-entry (used for VMX-preemption timer). */
    662662                uint64_t                uVmentryTick;
    663                 /** 0x3a0 - Padding. */
    664                 uint8_t             abPadding[0x3f0 - 0x3a0];
     663                /** 0x3a0 - Virtual-APIC write offset (until trap-like VM-exit). */
     664                uint16_t                offVirtApicWrite;
     665                /** 0x3a2 - Padding. */
     666                uint8_t             abPadding[0x3f0 - 0x3a2];
    665667            } vmx;
    666668        } CPUM_UNION_NM(s);
     
    773775AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uPrevPauseTick,         0x390);
    774776AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.uVmentryTick,           0x398);
     777AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.offVirtApicWrite,       0x3a0);
    775778AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0,           8);
    776779AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0,     8);
  • trunk/include/VBox/vmm/hm_vmx.h

    r74736 r75301  
    20792079 * @{
    20802080 */
    2081 /** Virtualize APIC access. */
     2081/** Virtualize APIC accesses. */
    20822082#define VMX_PROC_CTLS2_VIRT_APIC_ACCESS                         RT_BIT(0)
    20832083/** EPT supported/enabled. */
     
    28522852#define VMX_EXIT_QUAL_APIC_ACCESS_TYPE(a)                       (((a) & 0xf000) >> 12)
    28532853/* Rest reserved. */
     2854
     2855/** Bit fields for Exit qualification for APIC-access VM-exits. */
     2856#define VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET_SHIFT               0
     2857#define VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET_MASK                UINT64_C(0x0000000000000fff)
     2858#define VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE_SHIFT                 12
     2859#define VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE_MASK                  UINT64_C(0x000000000000f000)
     2860#define VMX_BF_EXIT_QUAL_APIC_ACCESS_RSVD_16_63_SHIFT           16
     2861#define VMX_BF_EXIT_QUAL_APIC_ACCESS_RSVD_16_63_MASK            UINT64_C(0xffffffffffff0000)
     2862RT_BF_ASSERT_COMPILE_CHECKS(VMX_BF_EXIT_QUAL_APIC_ACCESS_, UINT64_C(0), UINT64_MAX,
     2863                            (OFFSET, TYPE, RSVD_16_63));
    28542864/** @} */
    28552865
     
    28582868 * @{
    28592869 */
    2860 /** Linear read access. */
     2870/** Linear access for a data read during instruction execution. */
    28612871#define VMX_APIC_ACCESS_TYPE_LINEAR_READ                        0
    2862 /** Linear write access. */
     2872/** Linear access for a data write during instruction execution. */
    28632873#define VMX_APIC_ACCESS_TYPE_LINEAR_WRITE                       1
    2864 /** Linear instruction fetch access. */
     2874/** Linear access for an instruction fetch. */
    28652875#define VMX_APIC_ACCESS_TYPE_LINEAR_INSTR_FETCH                 2
    28662876/** Linear read/write access during event delivery. */
     
    28702880/** Physical access for an instruction fetch or during instruction execution. */
    28712881#define VMX_APIC_ACCESS_TYPE_PHYSICAL_INSTR                     15
     2882
     2883/**
     2884 * APIC-access type.
     2885 */
     2886typedef enum
     2887{
     2888    VMXAPICACCESS_LINEAR_READ             = VMX_APIC_ACCESS_TYPE_LINEAR_READ,
     2889    VMXAPICACCESS_LINEAR_WRITE            = VMX_APIC_ACCESS_TYPE_LINEAR_WRITE,
     2890    VMXAPICACCESS_LINEAR_INSTR_FETCH      = VMX_APIC_ACCESS_TYPE_LINEAR_INSTR_FETCH,
     2891    VMXAPICACCESS_LINEAR_EVENT_DELIVERY   = VMX_APIC_ACCESS_TYPE_LINEAR_EVENT_DELIVERY,
     2892    VMXAPICACCESS_PHYSICAL_EVENT_DELIVERY = VMX_APIC_ACCESS_TYPE_PHYSICAL_EVENT_DELIVERY,
     2893    VMXAPICACCESS_PHYSICAL_INSTR          = VMX_APIC_ACCESS_TYPE_PHYSICAL_INSTR
     2894} VMXAPICACCESS;
     2895AssertCompileSize(VMXAPICACCESS, 4);
    28722896/** @} */
    28732897
     
    38033827    /* VMLAUNCH/VMRESUME. */
    38043828    kVmxVDiag_Vmentry_AddrApicAccess,
     3829    kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic,
    38053830    kVmxVDiag_Vmentry_AddrEntryMsrLoad,
    38063831    kVmxVDiag_Vmentry_AddrExitMsrLoad,
  • trunk/include/VBox/vmm/vm.h

    r75200 r75301  
    366366 *
    367367 * Available VMCPU bits:
    368  *      14, 15, 33 to 63
     368 *      11, 14, 15, 35 to 63
    369369 *
    370370 * @todo If we run low on VMCPU, we may consider merging the SELM bits
     
    471471/** The bit number for VMCPU_FF_DBGF. */
    472472#define VMCPU_FF_DBGF_BIT                   10
    473 /** Pending MTF (Monitor Trap Flag) event - Intel only.  */
    474 #define VMCPU_FF_MTF                        RT_BIT_64(VMCPU_FF_MTF_BIT)
    475 /** The bit number for VMCPU_FF_MTF. */
    476 #define VMCPU_FF_MTF_BIT                    11
    477473/** This action forces the VM to service any pending updates to CR3 (used only
    478474 *  by HM). */
     
    550546/** VMX-preemption timer in effect. */
    551547#define VMCPU_FF_VMX_PREEMPT_TIMER          RT_BIT_64(VMCPU_FF_VMX_PREEMPT_TIMER_BIT)
     548/** Bit number for VMCPU_FF_VMX_PREEMPT_TIMER. */
    552549#define VMCPU_FF_VMX_PREEMPT_TIMER_BIT      32
     550/** Pending MTF (Monitor Trap Flag) event.  */
     551#define VMCPU_FF_VMX_MTF                    RT_BIT_64(VMCPU_FF_VMX_MTF_BIT)
     552/** The bit number for VMCPU_FF_VMX_MTF. */
     553#define VMCPU_FF_VMX_MTF_BIT                33
     554/** Virtual-APIC operation pending (VTPR, VEOI or APIC-write).  */
     555#define VMCPU_FF_VMX_UPDATE_VAPIC           RT_BIT_64(VMCPU_FF_VMX_UPDATE_VAPIC_BIT)
     556/** The bit number for VMCPU_FF_VMX_UPDATE_VTPR. */
     557#define VMCPU_FF_VMX_UPDATE_VAPIC_BIT       34
    553558
    554559
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r74696 r75301  
    131131    /* VMLAUNCH/VMRESUME. */
    132132    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrApicAccess           , "AddrApicAccess"            ),
     133    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic , "AddrApicAccessEqVirtApic"  ),
    133134    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrEntryMsrLoad         , "AddrEntryMsrLoad"          ),
    134135    VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrExitMsrLoad          , "AddrExitMsrLoad"           ),
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r75249 r75301  
    59085908                 */
    59095909                uint32_t const uVTpr = (uNewCrX & 0xf) << 4;
    5910                 iemVmxVirtApicWriteRaw32(pVCpu, uVTpr, XAPIC_OFF_TPR);
     5910                iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uVTpr);
    59115911                rcStrict = iemVmxVmexitTprVirtualization(pVCpu, cbInstr);
    59125912                if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     
    68026802                    {
    68036803                        uint32_t const uVTpr = uValue.s.Lo;
    6804                         iemVmxVirtApicWriteRaw32(pVCpu, uVTpr, XAPIC_OFF_TPR);
     6804                        iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uVTpr);
    68056805                        VBOXSTRICTRC rcStrict = iemVmxVmexitTprVirtualization(pVCpu, cbInstr);
    68066806                        if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r75214 r75301  
    913913
    914914/**
    915  * Reads a 32-bit register from the virtual-APIC page at the given offset.
    916  *
    917  * @returns The register from the virtual-APIC page.
    918  * @param   pVCpu       The cross context virtual CPU structure.
    919  * @param   offReg      The offset of the register being read.
    920  */
    921 DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint8_t offReg)
    922 {
    923     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    924     uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    925     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    926     uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
    927     return uReg;
    928 }
    929 
    930 
    931 /**
    932  * Writes a 32-bit register to the virtual-APIC page at the given offset.
    933  *
    934  * @param   pVCpu       The cross context virtual CPU structure.
    935  * @param   uReg        The register value to write.
    936  * @param   offReg      The offset of the register being written.
    937  */
    938 DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint32_t uReg, uint8_t offReg)
    939 {
    940     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    941     uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    942     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    943     *(uint32_t *)(pbVirtApic + offReg) = uReg;
    944 }
    945 
    946 
    947 /**
    948915 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
    949916 * mask and the read-shadow (CR0/CR4 read).
     
    19241891
    19251892    /* MTF should not be set outside VMX non-root mode. */
    1926     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_MTF));
     1893    Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    19271894
    19281895    /*
     
    39943961
    39953962/**
     3963 * Reads a 32-bit register from the virtual-APIC page at the given offset.
     3964 *
     3965 * @returns The register from the virtual-APIC page.
     3966 * @param   pVCpu       The cross context virtual CPU structure.
     3967 * @param   offReg      The offset of the register being read.
     3968 */
     3969DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
     3970{
     3971    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
     3972    uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
     3973    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
     3974    uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
     3975    return uReg;
     3976}
     3977
     3978
     3979/**
     3980 * Writes a 32-bit register to the virtual-APIC page at the given offset.
     3981 *
     3982 * @param   pVCpu       The cross context virtual CPU structure.
     3983 * @param   offReg      The offset of the register being written.
     3984 * @param   uReg        The register value to write.
     3985 */
     3986DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
     3987{
     3988    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
     3989    uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
     3990    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
     3991    *(uint32_t *)(pbVirtApic + offReg) = uReg;
     3992}
     3993
     3994
     3995/**
     3996 * Checks if an access of the APIC page must cause an APIC-access VM-exit.
     3997 *
     3998 * @param   pVCpu       The cross context virtual CPU structure.
     3999 * @param   offAccess   The offset of the register being accessed.
     4000 * @param   cbAccess    The size of the access in bytes.
     4001 * @param   fAccess     The type of access (must be IEM_ACCESS_TYPE_READ or
     4002 *                      IEM_ACCESS_TYPE_WRITE).
     4003 */
     4004IEM_STATIC bool iemVmxVirtApicIsAccessIntercepted(PVMCPU pVCpu, uint16_t offAccess, uint32_t cbAccess, uint32_t fAccess)
     4005{
     4006    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4007    Assert(pVmcs);
     4008    Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
     4009
     4010    /*
     4011     * We must cause a VM-exit if any of the following are true:
     4012     *   - TPR shadowing isn't active.
     4013     *   - The access size exceeds 32-bits.
     4014     *   - The access is not contained within low 4 bytes of a 16-byte aligned offset.
     4015     *
     4016     * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
     4017     * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
     4018     */
     4019    if (   !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     4020        || cbAccess > sizeof(uint32_t)
     4021        || ((offAccess + cbAccess - 1) & 0xc)
     4022        || offAccess >= XAPIC_OFF_END + 4)
     4023        return true;
     4024
     4025    /*
     4026     * If the access is part of an operation where we have already
     4027     * virtualized a virtual TPR write, we must cause a VM-exit.
     4028     */
     4029    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC))
     4030        return true;
     4031
     4032    /*
     4033     * Check read accesses to the APIC-access page that cause VM-exits.
     4034     */
     4035    if (fAccess == IEM_ACCESS_TYPE_READ)
     4036    {
     4037        if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
     4038        {
     4039            /*
     4040             * With APIC-register virtualization, a read access to any of the
     4041             * following registers are virtualized. Accessing any other register
     4042             * causes a VM-exit.
     4043             */
     4044            uint16_t const offAlignedAccess = offAccess & 0xfffc;
     4045            switch (offAlignedAccess)
     4046            {
     4047                /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
     4048                case XAPIC_OFF_ID:
     4049                case XAPIC_OFF_VERSION:
     4050                case XAPIC_OFF_TPR:
     4051                case XAPIC_OFF_EOI:
     4052                case XAPIC_OFF_LDR:
     4053                case XAPIC_OFF_DFR:
     4054                case XAPIC_OFF_SVR:
     4055                case XAPIC_OFF_ISR0:    case XAPIC_OFF_ISR1:    case XAPIC_OFF_ISR2:    case XAPIC_OFF_ISR3:
     4056                case XAPIC_OFF_ISR4:    case XAPIC_OFF_ISR5:    case XAPIC_OFF_ISR6:    case XAPIC_OFF_ISR7:
     4057                case XAPIC_OFF_TMR0:    case XAPIC_OFF_TMR1:    case XAPIC_OFF_TMR2:    case XAPIC_OFF_TMR3:
     4058                case XAPIC_OFF_TMR4:    case XAPIC_OFF_TMR5:    case XAPIC_OFF_TMR6:    case XAPIC_OFF_TMR7:
     4059                case XAPIC_OFF_IRR0:    case XAPIC_OFF_IRR1:    case XAPIC_OFF_IRR2:    case XAPIC_OFF_IRR3:
     4060                case XAPIC_OFF_IRR4:    case XAPIC_OFF_IRR5:    case XAPIC_OFF_IRR6:    case XAPIC_OFF_IRR7:
     4061                case XAPIC_OFF_ESR:
     4062                case XAPIC_OFF_ICR_LO:
     4063                case XAPIC_OFF_ICR_HI:
     4064                case XAPIC_OFF_LVT_TIMER:
     4065                case XAPIC_OFF_LVT_THERMAL:
     4066                case XAPIC_OFF_LVT_PERF:
     4067                case XAPIC_OFF_LVT_LINT0:
     4068                case XAPIC_OFF_LVT_LINT1:
     4069                case XAPIC_OFF_LVT_ERROR:
     4070                case XAPIC_OFF_TIMER_ICR:
     4071                case XAPIC_OFF_TIMER_DCR:
     4072                    break;
     4073                default:
     4074                    return true;
     4075            }
     4076        }
     4077        else
     4078        {
     4079            /* Without APIC-register virtualization, only TPR accesses are virtualized. */
     4080            if (offAccess == XAPIC_OFF_TPR)
     4081            { /* likely */ }
     4082            else
     4083                return true;
     4084        }
     4085    }
     4086    else
     4087    {
     4088        /*
     4089         * Check write accesses to the APIC-access page that cause VM-exits.
     4090         */
     4091        if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
     4092        {
     4093            /*
     4094             * With APIC-register virtualization, a write access to any of the
     4095             * following registers are virtualized. Accessing any other register
     4096             * causes a VM-exit.
     4097             */
     4098            uint16_t const offAlignedAccess = offAccess & 0xfffc;
     4099            switch (offAlignedAccess)
     4100            {
     4101                case XAPIC_OFF_ID:
     4102                case XAPIC_OFF_TPR:
     4103                case XAPIC_OFF_EOI:
     4104                case XAPIC_OFF_LDR:
     4105                case XAPIC_OFF_DFR:
     4106                case XAPIC_OFF_SVR:
     4107                case XAPIC_OFF_ESR:
     4108                case XAPIC_OFF_ICR_LO:
     4109                case XAPIC_OFF_ICR_HI:
     4110                case XAPIC_OFF_LVT_TIMER:
     4111                case XAPIC_OFF_LVT_THERMAL:
     4112                case XAPIC_OFF_LVT_PERF:
     4113                case XAPIC_OFF_LVT_LINT0:
     4114                case XAPIC_OFF_LVT_LINT1:
     4115                case XAPIC_OFF_LVT_ERROR:
     4116                case XAPIC_OFF_TIMER_ICR:
     4117                case XAPIC_OFF_TIMER_DCR:
     4118                    break;
     4119                default:
     4120                    return true;
     4121            }
     4122        }
     4123        else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
     4124        {
     4125            /*
     4126             * With virtual-interrupt delivery, a write access to any of the
     4127             * following registers are virtualized. Accessing any other register
     4128             * causes a VM-exit.
     4129             *
     4130             * Note! The specification does not allow writing to offsets in-between
     4131             * these registers (e.g. TPR + 1 byte) unlike read accesses.
     4132             */
     4133            switch (offAccess)
     4134            {
     4135                case XAPIC_OFF_TPR:
     4136                case XAPIC_OFF_EOI:
     4137                case XAPIC_OFF_ICR_LO:
     4138                    break;
     4139                default:
     4140                    return true;
     4141            }
     4142        }
     4143        else
     4144        {
     4145            /*
     4146             * Without APIC-register virtualization or virtual-interrupt delivery,
     4147             * only TPR accesses are virtualized.
     4148             */
     4149            if (offAccess == XAPIC_OFF_TPR)
     4150            { /* likely */ }
     4151            else
     4152                return true;
     4153        }
     4154    }
     4155
     4156    /* The APIC-access is virtualized, does not cause a VM-exit. */
     4157    return false;
     4158}
     4159
     4160
     4161/**
     4162 * VMX VM-exit handler for APIC-write VM-exits.
     4163 *
     4164 * @param   pVCpu       The cross context virtual CPU structure.
     4165 */
     4166IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPU pVCpu)
     4167{
     4168    iemVmxVmcsSetExitQual(pVCpu, pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite);
     4169    return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE);
     4170}
     4171
     4172
     4173/**
     4174 * VMX VM-exit handler for APIC-accesses.
     4175 *
     4176 * @param   pVCpu       The cross context virtual CPU structure.
     4177 * @param   offAccess   The offset of the register being accessed.
     4178 * @param   fAccess     The type of access (must be IEM_ACCESS_TYPE_READ or
     4179 *                      IEM_ACCESS_TYPE_WRITE).
     4180 */
     4181IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess)
     4182{
     4183    Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
     4184
     4185    VMXAPICACCESS enmAccess;
     4186    bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
     4187    if (fInEventDelivery)
     4188        enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
     4189    else if (fAccess == IEM_ACCESS_TYPE_READ)
     4190        enmAccess = VMXAPICACCESS_LINEAR_READ;
     4191    else
     4192        enmAccess = VMXAPICACCESS_LINEAR_WRITE;
     4193
     4194    uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
     4195                             | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE,   enmAccess);
     4196    iemVmxVmcsSetExitQual(pVCpu, uExitQual);
     4197    return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS);
     4198}
     4199
     4200
     4201/**
     4202 * Virtualizes an APIC read access.
     4203 *
     4204 * @returns VBox strict status code.
     4205 * @param   pVCpu       The cross context virtual CPU structure.
     4206 * @param   offAccess   The offset of the register being read.
     4207 * @param   cbAccess    The size of the APIC access.
     4208 * @param   pvData      Where to store the read data.
     4209 */
     4210IEM_STATIC VBOXSTRICTRC iemVmxVirtApicRead(PVMCPU pVCpu, uint16_t offAccess, uint32_t cbAccess, void *pvData)
     4211{
     4212    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4213    Assert(pVmcs);
     4214    Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
     4215    Assert(pvData);
     4216
     4217    /* Check if we need to cause a VM-exit for this APIC access. */
     4218    bool const fIntercept = iemVmxVirtApicIsAccessIntercepted(pVCpu, offAccess, cbAccess, IEM_ACCESS_TYPE_READ);
     4219    if (fIntercept)
     4220        return iemVmxVmexitApicAccess(pVCpu, offAccess, IEM_ACCESS_TYPE_READ);
     4221
     4222    /*
     4223     * A read access from the APIC-access page that is virtualized (rather than
     4224     * causing a VM-exit) returns data from the virtual-APIC page.
     4225     *
     4226     * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
     4227     */
     4228    Assert(cbAccess <= 4);
     4229    Assert(offAccess < XAPIC_OFF_END + 4);
     4230    static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
     4231
     4232    uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
     4233    u32Data &= s_auAccessSizeMasks[cbAccess];
     4234    *(uint32_t *)pvData = u32Data;
     4235    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
     4236}
     4237
     4238
     4239/**
     4240 * Virtualizes an APIC write access.
     4241 *
     4242 * @returns VBox strict status code.
     4243 * @param   pVCpu       The cross context virtual CPU structure.
     4244 * @param   offAccess   The offset of the register being written.
     4245 * @param   cbAccess    The size of the APIC access.
     4246 * @param   pvData      Pointer to the data being written.
     4247 */
     4248IEM_STATIC VBOXSTRICTRC iemVmxVirtApicWrite(PVMCPU pVCpu, uint16_t offAccess, uint32_t cbAccess, void *pvData)
     4249{
     4250    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4251    Assert(pVmcs);
     4252    Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
     4253    Assert(pvData);
     4254
     4255    /* Check if we need to cause a VM-exit for this APIC access. */
     4256    bool const fIntercept = iemVmxVirtApicIsAccessIntercepted(pVCpu, offAccess, cbAccess, IEM_ACCESS_TYPE_WRITE);
     4257    if (fIntercept)
     4258        return iemVmxVmexitApicAccess(pVCpu, offAccess, IEM_ACCESS_TYPE_WRITE);
     4259
     4260    /*
     4261     * A write access to the APIC-access page that is virtualized (rather than
     4262     * causing a VM-exit) writes data to the virtual-APIC page.
     4263     */
     4264    uint32_t const u32Data = *(uint32_t *)pvData;
     4265    iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
     4266
     4267    /*
     4268     * Record the currently updated APIC offset, as we need this later for figuring
     4269     * out what to do as well as the exit qualification when causing an APIC-write VM-exit.
     4270     */
     4271    pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offAccess;
     4272
     4273    /*
     4274     * After completion of the current operation, we need to perform TPR virtualization,
     4275     * EOI virtualization or APIC-write VM-exit depending on which register was written.
     4276     *
     4277     * The current operation may be a REP-prefixed string instruction, execution of any
     4278     * other instruction, or delivery of an event through the IDT.
     4279     *
     4280     * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
     4281     * performed now but later after completion of the current operation.
     4282     *
     4283     * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
     4284     */
     4285    VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
     4286    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
     4287}
     4288
     4289
     4290/**
    39964291 * VMX VM-exit handler for TPR virtualization.
    39974292 *
     
    54665761    {
    54675762        /* Virtual-APIC page physical address. */
    5468         RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     5763        RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
    54695764        if (   (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
    54705765            || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     
    55235818    {
    55245819        /* APIC-access physical address. */
    5525         RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
     5820        RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
    55265821        if (   (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
    55275822            || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    55285823            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
    55295824            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
     5825
     5826        /*
     5827         * Disallow APIC-access page and virtual-APIC page from being the same address.
     5828         * Note! This is not an Intel requirement, but one imposed by our implementation.
     5829         */
     5830        /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
     5831         *        redirecting accesses between the APIC-access page and the virtual-APIC
     5832         *        page. If any nested hypervisor requires this, we can implement it later. */
     5833        if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     5834        {
     5835            RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     5836            if (GCPhysVirtApic == GCPhysApicAccess)
     5837                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
     5838        }
    55305839    }
    55315840
     
    55595868    {
    55605869        /* VMREAD-bitmap physical address. */
    5561         RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
     5870        RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
    55625871        if (   ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
    55635872            || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     
    55665875
    55675876        /* VMWRITE-bitmap physical address. */
    5568         RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
     5877        RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
    55695878        if (   ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
    55705879            || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     
    59986307        {
    59996308            Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
    6000             VMCPU_FF_SET(pVCpu, VMCPU_FF_MTF);
     6309            VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
    60016310            return VINF_SUCCESS;
    60026311        }
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r75107 r75301  
    173173    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uPrevPauseTick);
    174174    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uVmentryTick);
     175    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.offVirtApicWrite);
    175176    GEN_CHECK_OFF(CPUMCTX, hwvirt.enmHwvirt);
    176177    GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette