VirtualBox

Changeset 79343 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Jun 26, 2019 9:05:12 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 Add functions to read/write VMCS fields without additional checks for upcoming VMCS shadowing changes.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r79031 r79343  
    1578515785
    1578615786/**
     15787 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
     15788 *
     15789 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
     15790 * are performed. Bounds checks are strict builds only.
     15791 *
     15792 * @param   pVmcs           Pointer to the virtual VMCS.
     15793 * @param   u64VmcsField    The VMCS field.
     15794 * @param   pu64Dst         Where to store the VMCS value.
     15795 *
     15796 * @remarks May be called with interrupts disabled.
     15797 * @todo    This should probably be moved to CPUM someday.
     15798 */
     15799VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
     15800{
     15801    AssertPtr(pVmcs);
     15802    AssertPtr(pu64Dst);
     15803    iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
     15804}
     15805
     15806
     15807/**
     15808 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
     15809 *
     15810 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
     15811 * are performed. Bounds checks are strict builds only.
     15812 *
     15813 * @param   pVmcs           Pointer to the virtual VMCS.
     15814 * @param   u64VmcsField    The VMCS field.
     15815 * @param   u64Val          The value to write.
     15816 *
     15817 * @remarks May be called with interrupts disabled.
     15818 * @todo    This should probably be moved to CPUM someday.
     15819 */
     15820VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
     15821{
     15822    AssertPtr(pVmcs);
     15823    iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
     15824}
     15825
     15826
     15827/**
    1578715828 * Interface for HM and EM to virtualize x2APIC MSR accesses.
    1578815829 *
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r79235 r79343  
    26092609        Log3(("vmexit: Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
    26102610
     2611    /* Notify HM that the current VMCS fields have been modified. */
     2612    HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
     2613
    26112614    /* Notify HM that we've completed the VM-exit. */
    2612     HMNotifyVmxNstGstVmexit(pVCpu, &pVCpu->cpum.GstCtx);
     2615    HMNotifyVmxNstGstVmexit(pVCpu);
    26132616
    26142617    /* We're no longer in nested-guest execution mode. */
     
    45734576        if ((uRvi >> 4) > (uPpr >> 4))
    45744577        {
    4575             Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signaling pending interrupt\n", uRvi, uPpr));
     4578            Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signalling pending interrupt\n", uRvi, uPpr));
    45764579            VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    45774580        }
     
    57095712        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
    57105713        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
    5711                                          GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
     5714                                         GCPhysShadowVmcs, VMX_V_SHADOW_VMCS_SIZE);
    57125715        if (RT_SUCCESS(rc))
    57135716        { /* likely */ }
     
    75317534
    75327535/**
    7533  * VMREAD common (memory/register) instruction execution worker
     7536 * VMREAD instruction execution worker that does not perform any validation checks.
     7537 *
     7538 * Callers are expected to have performed the necessary checks and to ensure the
     7539 * VMREAD will succeed.
     7540 *
     7541 * @param   pVmcs           Pointer to the virtual VMCS.
     7542 * @param   pu64Dst         Where to write the VMCS value.
     7543 * @param   u64VmcsField    The VMCS field.
     7544 *
     7545 * @remarks May be called with interrupts disabled.
     7546 */
     7547IEM_STATIC void iemVmxVmreadNoCheck(PCVMXVVMCS pVmcs, uint64_t *pu64Dst, uint64_t u64VmcsField)
     7548{
     7549    VMXVMCSFIELD VmcsField;
     7550    VmcsField.u = u64VmcsField;
     7551    uint8_t  const uWidth     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
     7552    uint8_t  const uType      = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
     7553    uint8_t  const uWidthType = (uWidth << 2) | uType;
     7554    uint8_t  const uIndex     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
     7555    Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
     7556    uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
     7557    Assert(offField < VMX_V_VMCS_SIZE);
     7558    AssertCompile(VMX_V_SHADOW_VMCS_SIZE == VMX_V_VMCS_SIZE);
     7559
     7560    /*
     7561     * Read the VMCS component based on the field's effective width.
     7562     *
     7563     * The effective width is 64-bit fields adjusted to 32-bits if the access-type
     7564     * indicates high bits (little endian).
     7565     *
     7566     * Note! The caller is responsible to trim the result and update registers
     7567     * or memory locations are required. Here we just zero-extend to the largest
     7568     * type (i.e. 64-bits).
     7569     */
     7570    uint8_t const *pbVmcs    = (uint8_t const *)pVmcs;
     7571    uint8_t const *pbField   = pbVmcs + offField;
     7572    uint8_t const  uEffWidth = HMVmxGetVmcsFieldWidthEff(VmcsField.u);
     7573    switch (uEffWidth)
     7574    {
     7575        case VMX_VMCSFIELD_WIDTH_64BIT:
     7576        case VMX_VMCSFIELD_WIDTH_NATURAL: *pu64Dst = *(uint64_t const *)pbField; break;
     7577        case VMX_VMCSFIELD_WIDTH_32BIT:   *pu64Dst = *(uint32_t const *)pbField; break;
     7578        case VMX_VMCSFIELD_WIDTH_16BIT:   *pu64Dst = *(uint16_t const *)pbField; break;
     7579    }
     7580}
     7581
     7582
     7583/**
     7584 * VMREAD common (memory/register) instruction execution worker.
    75347585 *
    75357586 * @returns Strict VBox status code.
     
    76037654
    76047655    /*
    7605      * Setup reading from the current or shadow VMCS.
    7606      */
    7607     uint8_t *pbVmcs;
    7608     if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    7609         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    7610     else
    7611         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
    7612     Assert(pbVmcs);
    7613 
    7614     VMXVMCSFIELD VmcsField;
    7615     VmcsField.u = u64VmcsField;
    7616     uint8_t  const uWidth     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
    7617     uint8_t  const uType      = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
    7618     uint8_t  const uWidthType = (uWidth << 2) | uType;
    7619     uint8_t  const uIndex     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
    7620     AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
    7621     uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
    7622     Assert(offField < VMX_V_VMCS_SIZE);
    7623 
    7624     /*
    7625      * Read the VMCS component based on the field's effective width.
    7626      *
    7627      * The effective width is 64-bit fields adjusted to 32-bits if the access-type
    7628      * indicates high bits (little endian).
    7629      *
    7630      * Note! The caller is responsible to trim the result and update registers
    7631      * or memory locations are required. Here we just zero-extend to the largest
    7632      * type (i.e. 64-bits).
    7633      */
    7634     uint8_t      *pbField   = pbVmcs + offField;
    7635     uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(VmcsField.u);
    7636     switch (uEffWidth)
    7637     {
    7638         case VMX_VMCSFIELD_WIDTH_64BIT:
    7639         case VMX_VMCSFIELD_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
    7640         case VMX_VMCSFIELD_WIDTH_32BIT:   *pu64Dst = *(uint32_t *)pbField; break;
    7641         case VMX_VMCSFIELD_WIDTH_16BIT:   *pu64Dst = *(uint16_t *)pbField; break;
    7642     }
     7656     * Reading from the current or shadow VMCS.
     7657     */
     7658    PCVMXVVMCS pVmcs = !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
     7659                     ? pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)
     7660                     : pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
     7661    Assert(pVmcs);
     7662    iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
    76437663    return VINF_SUCCESS;
    76447664}
     
    77407760    Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    77417761    return rcStrict;
     7762}
     7763
     7764
     7765/**
     7766 * VMWRITE instruction execution worker that does not perform any validation
     7767 * checks.
     7768 *
     7769 * Callers are expected to have performed the necessary checks and to ensure the
     7770 * VMWRITE will succeed.
     7771 *
     7772 * @param   pVmcs           Pointer to the virtual VMCS.
     7773 * @param   u64Val          The value to write.
     7774 * @param   u64VmcsField    The VMCS field.
     7775 *
     7776 * @remarks May be called with interrupts disabled.
     7777 */
     7778IEM_STATIC void iemVmxVmwriteNoCheck(PVMXVVMCS pVmcs, uint64_t u64Val, uint64_t u64VmcsField)
     7779{
     7780    VMXVMCSFIELD VmcsField;
     7781    VmcsField.u = u64VmcsField;
     7782    uint8_t  const uWidth     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
     7783    uint8_t  const uType      = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
     7784    uint8_t  const uWidthType = (uWidth << 2) | uType;
     7785    uint8_t  const uIndex     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
     7786    Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
     7787    uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
     7788    Assert(offField < VMX_V_VMCS_SIZE);
     7789    AssertCompile(VMX_V_SHADOW_VMCS_SIZE == VMX_V_VMCS_SIZE);
     7790
     7791    /*
     7792     * Write the VMCS component based on the field's effective width.
     7793     *
     7794     * The effective width is 64-bit fields adjusted to 32-bits if the access-type
     7795     * indicates high bits (little endian).
     7796     */
     7797    uint8_t      *pbVmcs    = (uint8_t *)pVmcs;
     7798    uint8_t      *pbField   = pbVmcs + offField;
     7799    uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(VmcsField.u);
     7800    switch (uEffWidth)
     7801    {
     7802        case VMX_VMCSFIELD_WIDTH_64BIT:
     7803        case VMX_VMCSFIELD_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
     7804        case VMX_VMCSFIELD_WIDTH_32BIT:   *(uint32_t *)pbField = u64Val; break;
     7805        case VMX_VMCSFIELD_WIDTH_16BIT:   *(uint16_t *)pbField = u64Val; break;
     7806    }
    77427807}
    77437808
     
    78587923
    78597924    /*
    7860      * Setup writing to the current or shadow VMCS.
    7861      */
    7862     uint8_t *pbVmcs;
    7863     if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    7864         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    7865     else
    7866         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
    7867     Assert(pbVmcs);
    7868 
    7869     VMXVMCSFIELD VmcsField;
    7870     VmcsField.u = u64VmcsField;
    7871     uint8_t  const uWidth     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
    7872     uint8_t  const uType      = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
    7873     uint8_t  const uWidthType = (uWidth << 2) | uType;
    7874     uint8_t  const uIndex     = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
    7875     AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
    7876     uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
    7877     Assert(offField < VMX_V_VMCS_SIZE);
    7878 
    7879     /*
    7880      * Write the VMCS component based on the field's effective width.
    7881      *
    7882      * The effective width is 64-bit fields adjusted to 32-bits if the access-type
    7883      * indicates high bits (little endian).
    7884      */
    7885     uint8_t      *pbField   = pbVmcs + offField;
    7886     uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(VmcsField.u);
    7887     switch (uEffWidth)
    7888     {
    7889         case VMX_VMCSFIELD_WIDTH_64BIT:
    7890         case VMX_VMCSFIELD_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
    7891         case VMX_VMCSFIELD_WIDTH_32BIT:   *(uint32_t *)pbField = u64Val; break;
    7892         case VMX_VMCSFIELD_WIDTH_16BIT:   *(uint16_t *)pbField = u64Val; break;
    7893     }
     7925     * Write to the current or shadow VMCS.
     7926     */
     7927    bool const fInVmxNonRootMode = IEM_VMX_IS_NON_ROOT_MODE(pVCpu);
     7928    PVMXVVMCS pVmcs = !fInVmxNonRootMode
     7929                    ? pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)
     7930                    : pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
     7931    Assert(pVmcs);
     7932    iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
     7933
     7934    /* Notify HM that the VMCS content might have changed. */
     7935    if (!fInVmxNonRootMode)
     7936        HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
    78947937
    78957938    iemVmxVmSucceed(pVCpu);
     
    82318274        rc = iemVmxReadCurrentVmcsFromGstMem(pVCpu);
    82328275        if (RT_SUCCESS(rc))
    8233         { /* likely */ }
     8276        {
     8277            /* Notify HM that a new, current VMCS is loaded. */
     8278            HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
     8279        }
    82348280        else
    82358281        {
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette