VirtualBox

Changeset 73617 in vbox for trunk/src/VBox/VMM/VMMR3/HM.cpp


Ignore:
Timestamp:
Aug 10, 2018 2:09:55 PM (6 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Use IEMExecOne() rather than manually interpreting a select few instructions in the
real-on-v86 mode when unrestricted-guest execution is not allowed.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r73606 r73617  
    27902790
    27912791/**
    2792  * Checks if a code selector (CS) is suitable for execution
    2793  * within VMX when unrestricted execution isn't available.
    2794  *
    2795  * @returns true if selector is suitable for VMX, otherwise
    2796  *        false.
    2797  * @param   pSel        Pointer to the selector to check (CS).
    2798  * @param   uStackDpl   The CPL, aka the DPL of the stack segment.
    2799  */
    2800 static bool hmR3IsCodeSelectorOkForVmx(PCPUMSELREG pSel, unsigned uStackDpl)
    2801 {
    2802     /*
    2803      * Segment must be an accessed code segment, it must be present and it must
    2804      * be usable.
    2805      * Note! These are all standard requirements and if CS holds anything else
    2806      *       we've got buggy code somewhere!
    2807      */
    2808     AssertCompile(X86DESCATTR_TYPE == 0xf);
    2809     AssertMsgReturn(   (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
    2810                     ==                 (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
    2811                     ("%#x\n", pSel->Attr.u),
    2812                     false);
    2813 
    2814     /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
    2815        must equal SS.DPL for non-confroming segments.
    2816        Note! This is also a hard requirement like above. */
    2817     AssertMsgReturn(  pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
    2818                     ? pSel->Attr.n.u2Dpl <= uStackDpl
    2819                     : pSel->Attr.n.u2Dpl == uStackDpl,
    2820                     ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
    2821                     false);
    2822 
    2823     /*
    2824      * The following two requirements are VT-x specific:
    2825      *  - G bit must be set if any high limit bits are set.
    2826      *  - G bit must be clear if any low limit bits are clear.
    2827      */
    2828     if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
    2829         && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
    2830         return true;
    2831     return false;
    2832 }
    2833 
    2834 
    2835 /**
    2836  * Checks if a data selector (DS/ES/FS/GS) is suitable for
    2837  * execution within VMX when unrestricted execution isn't
    2838  * available.
    2839  *
    2840  * @returns true if selector is suitable for VMX, otherwise
    2841  *        false.
    2842  * @param   pSel        Pointer to the selector to check
    2843  *                      (DS/ES/FS/GS).
    2844  */
    2845 static bool hmR3IsDataSelectorOkForVmx(PCPUMSELREG pSel)
    2846 {
    2847     /*
    2848      * Unusable segments are OK.  These days they should be marked as such, as
    2849      * but as an alternative we for old saved states and AMD<->VT-x migration
    2850      * we also treat segments with all the attributes cleared as unusable.
    2851      */
    2852     if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
    2853         return true;
    2854 
    2855     /** @todo tighten these checks. Will require CPUM load adjusting. */
    2856 
    2857     /* Segment must be accessed. */
    2858     if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
    2859     {
    2860         /* Code segments must also be readable. */
    2861         if (  !(pSel->Attr.u & X86_SEL_TYPE_CODE)
    2862             || (pSel->Attr.u & X86_SEL_TYPE_READ))
    2863         {
    2864             /* The S bit must be set. */
    2865             if (pSel->Attr.n.u1DescType)
    2866             {
    2867                 /* Except for conforming segments, DPL >= RPL. */
    2868                 if (   pSel->Attr.n.u2Dpl  >= (pSel->Sel & X86_SEL_RPL)
    2869                     || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
    2870                 {
    2871                     /* Segment must be present. */
    2872                     if (pSel->Attr.n.u1Present)
    2873                     {
    2874                         /*
    2875                          * The following two requirements are VT-x specific:
    2876                          *   - G bit must be set if any high limit bits are set.
    2877                          *   - G bit must be clear if any low limit bits are clear.
    2878                          */
    2879                         if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
    2880                             && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
    2881                             return true;
    2882                     }
    2883                 }
    2884             }
    2885         }
    2886     }
    2887 
    2888     return false;
    2889 }
    2890 
    2891 
    2892 /**
    2893  * Checks if the stack selector (SS) is suitable for execution
    2894  * within VMX when unrestricted execution isn't available.
    2895  *
    2896  * @returns true if selector is suitable for VMX, otherwise
    2897  *        false.
    2898  * @param   pSel        Pointer to the selector to check (SS).
    2899  */
    2900 static bool hmR3IsStackSelectorOkForVmx(PCPUMSELREG pSel)
    2901 {
    2902     /*
    2903      * Unusable segments are OK.  These days they should be marked as such, as
    2904      * but as an alternative we for old saved states and AMD<->VT-x migration
    2905      * we also treat segments with all the attributes cleared as unusable.
    2906      */
    2907     /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
    2908     if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
    2909         return true;
    2910 
    2911     /*
    2912      * Segment must be an accessed writable segment, it must be present.
    2913      * Note! These are all standard requirements and if SS holds anything else
    2914      *       we've got buggy code somewhere!
    2915      */
    2916     AssertCompile(X86DESCATTR_TYPE == 0xf);
    2917     AssertMsgReturn(   (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
    2918                     ==                 (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
    2919                     ("%#x\n", pSel->Attr.u), false);
    2920 
    2921     /* DPL must equal RPL.
    2922        Note! This is also a hard requirement like above. */
    2923     AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
    2924                     ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
    2925 
    2926     /*
    2927      * The following two requirements are VT-x specific:
    2928      *   - G bit must be set if any high limit bits are set.
    2929      *   - G bit must be clear if any low limit bits are clear.
    2930      */
    2931     if (   ((pSel->u32Limit & 0xfff00000) == 0x00000000 ||  pSel->Attr.n.u1Granularity)
    2932         && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
    2933         return true;
    2934     return false;
    2935 }
    2936 
    2937 
    2938 /**
    2939  * Checks if we can currently use hardware accelerated mode.
    2940  *
    2941  * @returns true if we can currently use hardware acceleration, otherwise false.
    2942  * @param   pVM         The cross context VM structure.
    2943  * @param   pCtx        Pointer to the guest CPU context.
    2944  */
    2945 VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
    2946 {
    2947     PVMCPU pVCpu = VMMGetCpu(pVM);
    2948 
    2949     Assert(HMIsEnabled(pVM));
    2950 
    2951 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    2952     if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    2953         || CPUMIsGuestVmxEnabled(pCtx))
    2954     {
    2955         Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false"));
    2956         return false;
    2957     }
    2958 #endif
    2959 
    2960     /* AMD-V supports real & protected mode with or without paging. */
    2961     if (pVM->hm.s.svm.fEnabled)
    2962     {
    2963         pVCpu->hm.s.fActive = true;
    2964         return true;
    2965     }
    2966 
    2967     pVCpu->hm.s.fActive = false;
    2968 
    2969     /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
    2970     Assert(   (pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
    2971            || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
    2972 
    2973     bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
    2974     if (!pVM->hm.s.vmx.fUnrestrictedGuest)
    2975     {
    2976         /*
    2977          * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
    2978          * guest execution feature is missing (VT-x only).
    2979          */
    2980         if (fSupportsRealMode)
    2981         {
    2982             if (CPUMIsGuestInRealModeEx(pCtx))
    2983             {
    2984                 /*
    2985                  * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
    2986                  * bases and limits, i.e. limit must be 64K and base must be selector * 16.
    2987                  * If this is not true, we cannot execute real mode as V86 and have to fall
    2988                  * back to emulation.
    2989                  */
    2990                 if (   pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
    2991                     || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
    2992                     || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
    2993                     || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
    2994                     || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
    2995                     || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
    2996                 {
    2997                     STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
    2998                     return false;
    2999                 }
    3000                 if (   (pCtx->cs.u32Limit != 0xffff)
    3001                     || (pCtx->ds.u32Limit != 0xffff)
    3002                     || (pCtx->es.u32Limit != 0xffff)
    3003                     || (pCtx->ss.u32Limit != 0xffff)
    3004                     || (pCtx->fs.u32Limit != 0xffff)
    3005                     || (pCtx->gs.u32Limit != 0xffff))
    3006                 {
    3007                     STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
    3008                     return false;
    3009                 }
    3010                 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
    3011             }
    3012             else
    3013             {
    3014                 /*
    3015                  * Verify the requirements for executing code in protected mode. VT-x can't
    3016                  * handle the CPU state right after a switch from real to protected mode
    3017                  * (all sorts of RPL & DPL assumptions).
    3018                  */
    3019                 if (pVCpu->hm.s.vmx.fWasInRealMode)
    3020                 {
    3021                     /** @todo If guest is in V86 mode, these checks should be different! */
    3022                     if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
    3023                     {
    3024                         STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
    3025                         return false;
    3026                     }
    3027                     if (   !hmR3IsCodeSelectorOkForVmx(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
    3028                         || !hmR3IsDataSelectorOkForVmx(&pCtx->ds)
    3029                         || !hmR3IsDataSelectorOkForVmx(&pCtx->es)
    3030                         || !hmR3IsDataSelectorOkForVmx(&pCtx->fs)
    3031                         || !hmR3IsDataSelectorOkForVmx(&pCtx->gs)
    3032                         || !hmR3IsStackSelectorOkForVmx(&pCtx->ss))
    3033                     {
    3034                         STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
    3035                         return false;
    3036                     }
    3037                 }
    3038                 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
    3039                 if (pCtx->gdtr.cbGdt)
    3040                 {
    3041                     if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
    3042                     {
    3043                         STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
    3044                         return false;
    3045                     }
    3046                     else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
    3047                     {
    3048                         STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
    3049                         return false;
    3050                     }
    3051                 }
    3052                 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
    3053             }
    3054         }
    3055         else
    3056         {
    3057             if (   !CPUMIsGuestInLongModeEx(pCtx)
    3058                 && !pVM->hm.s.vmx.fUnrestrictedGuest)
    3059             {
    3060                 if (   !pVM->hm.s.fNestedPaging        /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
    3061                     ||  CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
    3062                     return false;
    3063 
    3064                 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
    3065                 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
    3066                     return false;
    3067 
    3068                 /*
    3069                  * The guest is about to complete the switch to protected mode. Wait a bit longer.
    3070                  * Windows XP; switch to protected mode; all selectors are marked not present
    3071                  * in the hidden registers (possible recompiler bug; see load_seg_vm).
    3072                  */
    3073                 /** @todo Is this supposed recompiler bug still relevant with IEM? */
    3074                 if (pCtx->cs.Attr.n.u1Present == 0)
    3075                     return false;
    3076                 if (pCtx->ss.Attr.n.u1Present == 0)
    3077                     return false;
    3078 
    3079                 /*
    3080                  * Windows XP: possible same as above, but new recompiler requires new
    3081                  * heuristics? VT-x doesn't seem to like something about the guest state and
    3082                  * this stuff avoids it.
    3083                  */
    3084                 /** @todo This check is actually wrong, it doesn't take the direction of the
    3085                  *        stack segment into account. But, it does the job for now. */
    3086                 if (pCtx->rsp >= pCtx->ss.u32Limit)
    3087                     return false;
    3088             }
    3089         }
    3090     }
    3091 
    3092     if (pVM->hm.s.vmx.fEnabled)
    3093     {
    3094         uint32_t uCr0Mask;
    3095 
    3096         /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
    3097         uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
    3098 
    3099         /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
    3100         uCr0Mask &= ~X86_CR0_NE;
    3101 
    3102         if (fSupportsRealMode)
    3103         {
    3104             /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
    3105             uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
    3106         }
    3107         else
    3108         {
    3109             /* We support protected mode without paging using identity mapping. */
    3110             uCr0Mask &= ~X86_CR0_PG;
    3111         }
    3112         if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
    3113             return false;
    3114 
    3115         /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
    3116         uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
    3117         if ((pCtx->cr0 & uCr0Mask) != 0)
    3118             return false;
    3119 
    3120         /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
    3121         uCr0Mask  = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
    3122         uCr0Mask &= ~X86_CR4_VMXE;
    3123         if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
    3124             return false;
    3125 
    3126         /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
    3127         uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
    3128         if ((pCtx->cr4 & uCr0Mask) != 0)
    3129             return false;
    3130 
    3131         pVCpu->hm.s.fActive = true;
    3132         return true;
    3133     }
    3134 
    3135     return false;
    3136 }
    3137 
    3138 
    3139 /**
    31402792 * Checks if we need to reschedule due to VMM device heap changes.
    31412793 *
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette