VirtualBox

Changeset 76850 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jan 17, 2019 11:23:47 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
128209
Message:

VMM/IEM: Nested VMX: bugref:9180 Rescheduling fixes. Don't clear the host LDTR base and limit while restoring host state on VM-exit, the spec only mentions clearing the selector and marking it unusable.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r76837 r76850  
    165165/** Enables/disables IEM-only EM execution policy in and from ring-3.   */
    166166# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    167 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix) \
     167#  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet) \
    168168    do { \
    169169        Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
     
    171171    } while (0)
    172172
    173 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(a_pVCpu, a_pszLogPrefix) \
     173#  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet) \
    174174    do { \
    175175        Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
    176         EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \
     176        return EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \
    177177    } while (0)
    178178# else
    179 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix)     do { return VINF_SUCCESS; } while (0)
    180 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(a_pVCpu, a_pszLogPrefix)        do { } while (0)
     179#  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet)   do { return (a_rcRet); } while (0)
     180#  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet)  do { return (a_rcRet); } while (0)
    181181# endif
    182182
     
    15611561 * @param   pVCpu       The cross context virtual CPU structure.
    15621562 */
    1563 IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
     1563IEM_STATIC void iemVmxVmentrySaveNmiBlockingFF(PVMCPU pVCpu)
    15641564{
    15651565    /* We shouldn't be called multiple times during VM-entry. */
     
    16021602 * @param   pVCpu       The cross context virtual CPU structure.
    16031603 */
    1604 IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
     1604IEM_STATIC void iemVmxVmexitRestoreNmiBlockingFF(PVMCPU pVCpu)
    16051605{
    16061606    if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
     
    21512151        RTSEL const HostSel   = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
    21522152        bool const  fUnusable = RT_BOOL(HostSel == 0);
     2153        PCPUMSELREG pSelReg   = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
    21532154
    21542155        /* Selector. */
    2155         pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel      = HostSel;
    2156         pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
    2157         pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags   = CPUMSELREG_FLAGS_VALID;
     2156        pSelReg->Sel      = HostSel;
     2157        pSelReg->ValidSel = HostSel;
     2158        pSelReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    21582159
    21592160        /* Limit. */
    2160         pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
     2161        pSelReg->u32Limit = 0xffffffff;
    21612162
    21622163        /* Base. */
    2163         pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
     2164        pSelReg->u64Base = 0;
    21642165
    21652166        /* Attributes. */
    21662167        if (iSegReg == X86_SREG_CS)
    21672168        {
    2168             pVCpu->cpum.GstCtx.cs.Attr.n.u4Type        = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
    2169             pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType    = 1;
    2170             pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl         = 0;
    2171             pVCpu->cpum.GstCtx.cs.Attr.n.u1Present     = 1;
    2172             pVCpu->cpum.GstCtx.cs.Attr.n.u1Long        = fHostInLongMode;
    2173             pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig      = !fHostInLongMode;
    2174             pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
    2175             Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
     2169            pSelReg->Attr.n.u4Type        = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
     2170            pSelReg->Attr.n.u1DescType    = 1;
     2171            pSelReg->Attr.n.u2Dpl         = 0;
     2172            pSelReg->Attr.n.u1Present     = 1;
     2173            pSelReg->Attr.n.u1Long        = fHostInLongMode;
     2174            pSelReg->Attr.n.u1DefBig      = !fHostInLongMode;
     2175            pSelReg->Attr.n.u1Granularity = 1;
     2176            Assert(!pSelReg->Attr.n.u1Unusable);
    21762177            Assert(!fUnusable);
    21772178        }
    21782179        else
    21792180        {
    2180             pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
    2181             pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType    = 1;
    2182             pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl         = 0;
    2183             pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present     = 1;
    2184             pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig      = 1;
    2185             pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
    2186             pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable    = fUnusable;
     2181            pSelReg->Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
     2182            pSelReg->Attr.n.u1DescType    = 1;
     2183            pSelReg->Attr.n.u2Dpl         = 0;
     2184            pSelReg->Attr.n.u1Present     = 1;
     2185            pSelReg->Attr.n.u1DefBig      = 1;
     2186            pSelReg->Attr.n.u1Granularity = 1;
     2187            pSelReg->Attr.n.u1Unusable    = fUnusable;
    21872188        }
    21882189    }
     
    22192220    pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
    22202221
    2221     /* LDTR. */
     2222    /* LDTR (Warning! do not touch the base and limits here). */
    22222223    pVCpu->cpum.GstCtx.ldtr.Sel               = 0;
    22232224    pVCpu->cpum.GstCtx.ldtr.ValidSel          = 0;
    22242225    pVCpu->cpum.GstCtx.ldtr.fFlags            = CPUMSELREG_FLAGS_VALID;
    2225     pVCpu->cpum.GstCtx.ldtr.u32Limit          = 0;
    2226     pVCpu->cpum.GstCtx.ldtr.u64Base           = 0;
    2227     pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
     2226    pVCpu->cpum.GstCtx.ldtr.Attr.u            = X86DESCATTR_UNUSABLE;
    22282227
    22292228    /* GDTR. */
     
    28162815    Assert(pVmcs);
    28172816
     2817    /* Update the VM-exit reason, the other relevant data fields are expected to be updated by the caller already. */
    28182818    pVmcs->u32RoExitReason = uExitReason;
    28192819    Log3(("vmexit: uExitReason=%#RX32 uExitQual=%#RX64\n", uExitReason, pVmcs->u64RoExitQual));
     
    28302830         * occurs in enclave mode/SMM which we don't support yet.
    28312831         *
    2832          * If we ever add support for it, we can pass just the lower bits, till then an assert
    2833          * should suffice.
     2832         * If we ever add support for it, we can pass just the lower bits to the functions
     2833         * below, till then an assert should suffice.
    28342834         */
    28352835        Assert(!RT_HI_U16(uExitReason));
    28362836
     2837        /* Save the guest state into the VMCS and restore guest MSRs from the auto-store guest MSR area. */
    28372838        iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
    28382839        int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
     
    28402841        { /* likely */ }
    28412842        else
    2842         {
    2843             IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VMX-Abort");
    28442843            return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
    2845         }
     2844
     2845        /* Clear any saved NMI-blocking state so we don't assert on next VM-entry (if it was in effect on the previous one). */
     2846        pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions &= ~VMCPU_FF_BLOCK_NMIS;
    28462847    }
    28472848    else
    28482849    {
    2849         /* Restore force-flags that may or may not have been cleared as part of the failed VM-entry. */
    2850         iemVmxVmexitRestoreForceFlags(pVCpu);
     2850        /* Restore the NMI-blocking state if VM-entry failed due to invalid guest state or while loading MSRs. */
     2851        uint32_t const uExitReasonBasic = VMX_EXIT_REASON_BASIC(uExitReason);
     2852        if (   uExitReasonBasic == VMX_EXIT_ERR_INVALID_GUEST_STATE
     2853            || uExitReasonBasic == VMX_EXIT_ERR_MSR_LOAD)
     2854            iemVmxVmexitRestoreNmiBlockingFF(pVCpu);
    28512855    }
    28522856
     
    28642868    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
    28652869
    2866     /* Revert any IEM-only nested-guest execution policy if any. */
    2867     IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VM-exit");
    2868     return rcStrict;
     2870#ifdef IN_RING3
     2871    LogRel(("vmexit: uExitReason=%s\n", HMR3GetVmxExitName(uExitReason)));
     2872#endif
     2873
     2874    /* Revert any IEM-only nested-guest execution policy if it was set earlier, otherwise return rcStrict. */
     2875    IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(pVCpu, "VM-exit", rcStrict);
    28692876# endif
    28702877}
     
    72487255
    72497256    /* CPL. */
    7250     if (pVCpu->iem.s.uCpl > 0)
     7257    if (pVCpu->iem.s.uCpl == 0)
     7258    { /* likely */ }
     7259    else
    72517260    {
    72527261        Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
     
    72567265
    72577266    /* Current VMCS valid. */
    7258     if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     7267    if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     7268    { /* likely */ }
     7269    else
    72597270    {
    72607271        Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
     
    73357346                    /*
    73367347                     * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
    7337                      * So we save the required force flags here (currently only VMCPU_FF_BLOCK_NMI) so we
    7338                      * can restore it on VM-exit when required.
     7348                     * So we save the the VMCPU_FF_BLOCK_NMI force-flag here so we can restore it on
     7349                     * VM-exit when required.
     7350                     * See Intel spec. 26.7 "VM-entry Failures During or After Loading Guest State"
    73397351                     */
    7340                     iemVmxVmentrySaveForceFlags(pVCpu);
     7352                    iemVmxVmentrySaveNmiBlockingFF(pVCpu);
    73417353
    73427354                    rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
     
    74007412                                iemVmxVmentrySetupMtf(pVCpu, pszInstr);
    74017413
    7402                                 /* Now that we've switched page tables, we can inject events if any. */
    7403                                 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
    7404 
    7405                                 /*
    7406                                  * We've successfully entered nested-guest execution at this point.
    7407                                  * Return after setting nested-guest EM execution policy as necessary.
    7408                                  */
    7409                                 IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr);
     7414                                /* Now that we've switched page tables, we can go ahead and inject any event. */
     7415                                rcStrict = iemVmxVmentryInjectEvent(pVCpu, pszInstr);
     7416                                if (RT_SUCCESS(rcStrict))
     7417                                {
     7418                                    /* Reschedule to IEM-only execution of the nested-guest or return VINF_SUCCESS. */
     7419                                    IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr, VINF_SUCCESS);
     7420                                }
     7421
     7422                                Log(("%s: VM-entry event injection failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     7423                                return rcStrict;
    74107424                            }
    74117425                            return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
     
    75447558
    75457559    /* CPL. */
    7546     if (pVCpu->iem.s.uCpl > 0)
     7560    if (pVCpu->iem.s.uCpl == 0)
     7561    { /* likely */ }
     7562    else
    75477563    {
    75487564        Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     
    75747590
    75757591    /* Supported VMCS field. */
    7576     if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
     7592    if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
     7593    { /* likely */ }
     7594    else
    75777595    {
    75787596        Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
     
    77637781
    77647782    /* CPL. */
    7765     if (pVCpu->iem.s.uCpl > 0)
     7783    if (pVCpu->iem.s.uCpl == 0)
     7784    { /* likely */ }
     7785    else
    77667786    {
    77677787        Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     
    78217841
    78227842    /* Supported VMCS field. */
    7823     if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
     7843    if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
     7844    { /* likely */ }
     7845    else
    78247846    {
    78257847        Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
     
    79127934
    79137935    /* CPL. */
    7914     if (pVCpu->iem.s.uCpl > 0)
     7936    if (pVCpu->iem.s.uCpl == 0)
     7937    { /* likely */ }
     7938    else
    79157939    {
    79167940        Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     
    80318055
    80328056    /* CPL. */
    8033     if (pVCpu->iem.s.uCpl > 0)
     8057    if (pVCpu->iem.s.uCpl == 0)
     8058    { /* likely */ }
     8059    else
    80348060    {
    80358061        Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     
    80818107
    80828108    /* CPL. */
    8083     if (pVCpu->iem.s.uCpl > 0)
     8109    if (pVCpu->iem.s.uCpl == 0)
     8110    { /* likely */ }
     8111    else
    80848112    {
    80858113        Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     
    82368264    {
    82378265        /* CPL. */
    8238         if (pVCpu->iem.s.uCpl > 0)
     8266        if (pVCpu->iem.s.uCpl == 0)
     8267        { /* likely */ }
     8268        else
    82398269        {
    82408270            Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     
    82448274
    82458275        /* A20M (A20 Masked) mode. */
    8246         if (!PGMPhysIsA20Enabled(pVCpu))
     8276        if (PGMPhysIsA20Enabled(pVCpu))
     8277        { /* likely */ }
     8278        else
    82478279        {
    82488280            Log(("vmxon: A20M mode -> #GP(0)\n"));
     
    84308462
    84318463    /* CPL. */
    8432     if (pVCpu->iem.s.uCpl > 0)
     8464    if (pVCpu->iem.s.uCpl == 0)
     8465    { /* likely */ }
     8466    else
    84338467    {
    84348468        Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette