VirtualBox

Changeset 74621 in vbox for trunk/src


Ignore:
Timestamp:
Oct 5, 2018 6:09:04 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 VM-exit; CR3 intercepts.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r74620 r74621  
    52105210
    52115211#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    5212     /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
    52135212    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    52145213    {
    5215         if (   iCrReg == 0
    5216             || iCrReg == 4)
    5217             crX = iemVmxMaskCr0CR4(pVCpu, iCrReg, crX);
     5214        switch (iCrReg)
     5215        {
     5216            case 0:
     5217            case 4:
     5218            {
     5219                /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
     5220                crX = iemVmxMaskCr0CR4(pVCpu, iCrReg, crX);
     5221                break;
     5222            }
     5223
     5224            case 3:
     5225            {
     5226                VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
     5227                if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     5228                    return rcStrict;
     5229                break;
     5230            }
     5231        }
    52185232    }
    52195233#endif
     
    57495763    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    57505764    {
    5751         if (   iCrReg == 0
    5752             || iCrReg == 4)
    5753         {
    5754             VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr);
    5755             if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     5765        VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
     5766        switch (iCrReg)
     5767        {
     5768            case 0:
     5769            case 4:
     5770                rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr);
     5771                break;
     5772            case 3:
     5773                rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr);
     5774                break;
     5775            default:
     5776                break;
     5777        }
     5778
     5779        if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
    57565780                return rcStrict;
    5757         }
    57585781    }
    57595782#endif
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74620 r74621  
    901901
    902902/**
     903 * Gets a CR3 target value from the VMCS.
     904 *
     905 * @returns VBox status code.
     906 * @param   pVmcs           Pointer to the virtual VMCS.
     907 * @param   idxCr3Target    The index of the CR3-target value to retreive.
     908 * @param   puValue         Where to store the CR3-target value.
     909 */
     910DECLINLINE(uint64_t) iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
     911{
     912    Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
     913
     914    uint8_t  const  uWidth         = VMX_VMCS_ENC_WIDTH_NATURAL;
     915    uint8_t  const  uType          = VMX_VMCS_ENC_TYPE_CONTROL;
     916    uint8_t  const  uWidthType     = (uWidth << 2) | uType;
     917    uint8_t  const  uIndex         = (idxCr3Target << 1) + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCS_ENC_INDEX);
     918    Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
     919    uint16_t const  offField       = g_aoffVmcsMap[uWidthType][uIndex];
     920    uint8_t  const *pbVmcs         = (uint8_t *)pVmcs;
     921    uint8_t  const *pbField        = pbVmcs + offField;
     922    uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
     923
     924    return uCr3TargetValue;
     925}
     926
     927
     928/**
    903929 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
    904  * mask and the read-shadow.
     930 * mask and the read-shadow (CR0/CR4 read).
    905931 *
    906932 * @returns The masked CR0/CR4.
     
    29192945     * If CR0.TS is owned by the host:
    29202946     *   - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
    2921      *   - If CR0.TS is cleared in the read-shadow, no VM-exit is triggered, however
     2947     *   - If CR0.TS is cleared in the read-shadow, no VM-exit is caused, however
    29222948     *     the CLTS instruction is not allowed to modify CR0.TS.
    29232949     *
     
    29522978
    29532979/**
    2954  * VMX VM-exit handler for VM-exits due to 'Mov CR0, GReg' and 'Mov CR4, GReg'
     2980 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
    29552981 * (CR0/CR4 write).
    29562982 *
     
    29602986 * @param   uGuestCrX       The current guest CR0/CR4.
    29612987 * @param   puNewCrX        Pointer to the new CR0/CR4 value. Will be updated
    2962  *                          if no VM-exit is triggered.
    2963  * @param   iGReg           The general register to load the CR0/CR4 value from.
     2988 *                          if no VM-exit is caused.
     2989 * @param   iGReg           The general register from which the CR0/CR4 value is
     2990 *                          being loaded.
    29642991 * @param   cbInstr         The instruction length in bytes.
    29652992 */
     
    30203047     */
    30213048    *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
     3049
     3050    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
     3051}
     3052
     3053
     3054/**
     3055 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
     3056 *
     3057 * @returns VBox strict status code.
     3058 * @param   pVCpu       The cross context virtual CPU structure.
     3059 * @param   iGReg       The general register to which the CR3 value is being stored.
     3060 * @param   cbInstr     The instruction length in bytes.
     3061 */
     3062IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
     3063{
     3064    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3065    Assert(pVmcs);
     3066    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
     3067
     3068    /*
     3069     * If the CR3-store exiting control is set, we must cause a VM-exit.
     3070     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
     3071     */
     3072    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
     3073    {
     3074        Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
     3075
     3076        VMXVEXITINFO ExitInfo;
     3077        RT_ZERO(ExitInfo);
     3078        ExitInfo.uReason = VMX_EXIT_MOV_CRX;
     3079        ExitInfo.cbInstr = cbInstr;
     3080
     3081        ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
     3082                         | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS,   VMX_EXIT_QUAL_CRX_ACCESS_READ)
     3083                         | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG,   iGReg);
     3084        return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
     3085    }
     3086
     3087    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
     3088}
     3089
     3090
     3091/**
     3092 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
     3093 *
     3094 * @returns VBox strict status code.
     3095 * @param   pVCpu       The cross context virtual CPU structure.
     3096 * @param   uNewCr3     The new CR3 value.
     3097 * @param   iGReg       The general register from which the CR3 value is being
     3098 *                      loaded.
     3099 * @param   cbInstr     The instruction length in bytes.
     3100 */
     3101IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
     3102{
     3103    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3104    Assert(pVmcs);
     3105
     3106    /*
     3107     * If the CR3-load exiting control is set and the new CR3 value does not
     3108     * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
     3109     *
     3110     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
     3111     */
     3112    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
     3113    {
     3114        uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;
     3115        Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
     3116
     3117        for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
     3118        {
     3119            uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
     3120            if (uNewCr3 != uCr3TargetValue)
     3121            {
     3122                Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
     3123
     3124                VMXVEXITINFO ExitInfo;
     3125                RT_ZERO(ExitInfo);
     3126                ExitInfo.uReason = VMX_EXIT_MOV_CRX;
     3127                ExitInfo.cbInstr = cbInstr;
     3128
     3129                ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
     3130                                 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS,   VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
     3131                                 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG,   iGReg);
     3132                return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
     3133            }
     3134        }
     3135    }
    30223136
    30233137    return VINF_VMX_INTERCEPT_NOT_ACTIVE;
     
    51795293
    51805294    /* Consult the MSR bitmap if the feature is supported. */
    5181     if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_MSR_BITMAPS))
     5295    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5296    Assert(pVmcs);
     5297    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    51825298    {
    51835299        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette