Changeset 76850 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jan 17, 2019 11:23:47 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 128209
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r76837 r76850 165 165 /** Enables/disables IEM-only EM execution policy in and from ring-3. */ 166 166 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 167 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix ) \167 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet) \ 168 168 do { \ 169 169 Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \ … … 171 171 } while (0) 172 172 173 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE (a_pVCpu, a_pszLogPrefix) \173 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet) \ 174 174 do { \ 175 175 Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \ 176 EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \176 return EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \ 177 177 } while (0) 178 178 # else 179 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix ) do { return VINF_SUCCESS; } while (0)180 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE (a_pVCpu, a_pszLogPrefix) do {} while (0)179 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet) do { return (a_rcRet); } while (0) 180 # define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcRet) do { return (a_rcRet); } while (0) 181 181 # endif 182 182 … … 1561 1561 * @param pVCpu The cross context virtual CPU structure. 1562 1562 */ 1563 IEM_STATIC void iemVmxVmentrySave ForceFlags(PVMCPU pVCpu)1563 IEM_STATIC void iemVmxVmentrySaveNmiBlockingFF(PVMCPU pVCpu) 1564 1564 { 1565 1565 /* We shouldn't be called multiple times during VM-entry. */ … … 1602 1602 * @param pVCpu The cross context virtual CPU structure. 1603 1603 */ 1604 IEM_STATIC void iemVmxVmexitRestore ForceFlags(PVMCPU pVCpu)1604 IEM_STATIC void iemVmxVmexitRestoreNmiBlockingFF(PVMCPU pVCpu) 1605 1605 { 1606 1606 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions) … … 2151 2151 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg); 2152 2152 bool const fUnusable = RT_BOOL(HostSel == 0); 2153 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg]; 2153 2154 2154 2155 /* Selector. */ 2155 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;2156 p VCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;2157 p VCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;2156 pSelReg->Sel = HostSel; 2157 pSelReg->ValidSel = HostSel; 2158 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID; 2158 2159 2159 2160 /* Limit. */ 2160 p VCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;2161 pSelReg->u32Limit = 0xffffffff; 2161 2162 2162 2163 /* Base. */ 2163 p VCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;2164 pSelReg->u64Base = 0; 2164 2165 2165 2166 /* Attributes. */ 2166 2167 if (iSegReg == X86_SREG_CS) 2167 2168 { 2168 p VCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;2169 p VCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;2170 p VCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;2171 p VCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;2172 p VCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;2173 p VCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;2174 p VCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;2175 Assert(!p VCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);2169 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED; 2170 pSelReg->Attr.n.u1DescType = 1; 2171 pSelReg->Attr.n.u2Dpl = 0; 2172 pSelReg->Attr.n.u1Present = 1; 2173 pSelReg->Attr.n.u1Long = fHostInLongMode; 2174 pSelReg->Attr.n.u1DefBig = !fHostInLongMode; 2175 pSelReg->Attr.n.u1Granularity = 1; 2176 Assert(!pSelReg->Attr.n.u1Unusable); 2176 2177 Assert(!fUnusable); 2177 2178 } 2178 2179 else 2179 2180 { 2180 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;2181 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;2182 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;2183 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;2184 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;2185 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;2186 p VCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;2181 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED; 2182 pSelReg->Attr.n.u1DescType = 1; 2183 pSelReg->Attr.n.u2Dpl = 0; 2184 pSelReg->Attr.n.u1Present = 1; 2185 pSelReg->Attr.n.u1DefBig = 1; 2186 pSelReg->Attr.n.u1Granularity = 1; 2187 pSelReg->Attr.n.u1Unusable = fUnusable; 2187 2188 } 2188 2189 } … … 2219 2220 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0; 2220 2221 2221 /* LDTR . */2222 /* LDTR (Warning! do not touch the base and limits here). */ 2222 2223 pVCpu->cpum.GstCtx.ldtr.Sel = 0; 2223 2224 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0; 2224 2225 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 2225 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0; 2226 pVCpu->cpum.GstCtx.ldtr.u64Base = 0; 2227 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1; 2226 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE; 2228 2227 2229 2228 /* GDTR. */ … … 2816 2815 Assert(pVmcs); 2817 2816 2817 /* Update the VM-exit reason, the other relevant data fields are expected to be updated by the caller already. */ 2818 2818 pVmcs->u32RoExitReason = uExitReason; 2819 2819 Log3(("vmexit: uExitReason=%#RX32 uExitQual=%#RX64\n", uExitReason, pVmcs->u64RoExitQual)); … … 2830 2830 * occurs in enclave mode/SMM which we don't support yet. 2831 2831 * 2832 * If we ever add support for it, we can pass just the lower bits , till then an assert2833 * should suffice.2832 * If we ever add support for it, we can pass just the lower bits to the functions 2833 * below, till then an assert should suffice. 2834 2834 */ 2835 2835 Assert(!RT_HI_U16(uExitReason)); 2836 2836 2837 /* Save the guest state into the VMCS and restore guest MSRs from the auto-store guest MSR area. */ 2837 2838 iemVmxVmexitSaveGuestState(pVCpu, uExitReason); 2838 2839 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason); … … 2840 2841 { /* likely */ } 2841 2842 else 2842 {2843 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VMX-Abort");2844 2843 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS); 2845 } 2844 2845 /* Clear any saved NMI-blocking state so we don't assert on next VM-entry (if it was in effect on the previous one). */ 2846 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions &= ~VMCPU_FF_BLOCK_NMIS; 2846 2847 } 2847 2848 else 2848 2849 { 2849 /* Restore force-flags that may or may not have been cleared as part of the failed VM-entry. */ 2850 iemVmxVmexitRestoreForceFlags(pVCpu); 2850 /* Restore the NMI-blocking state if VM-entry failed due to invalid guest state or while loading MSRs. */ 2851 uint32_t const uExitReasonBasic = VMX_EXIT_REASON_BASIC(uExitReason); 2852 if ( uExitReasonBasic == VMX_EXIT_ERR_INVALID_GUEST_STATE 2853 || uExitReasonBasic == VMX_EXIT_ERR_MSR_LOAD) 2854 iemVmxVmexitRestoreNmiBlockingFF(pVCpu); 2851 2855 } 2852 2856 … … 2864 2868 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false; 2865 2869 2866 /* Revert any IEM-only nested-guest execution policy if any. */ 2867 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE(pVCpu, "VM-exit"); 2868 return rcStrict; 2870 #ifdef IN_RING3 2871 LogRel(("vmexit: uExitReason=%s\n", HMR3GetVmxExitName(uExitReason))); 2872 #endif 2873 2874 /* Revert any IEM-only nested-guest execution policy if it was set earlier, otherwise return rcStrict. */ 2875 IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(pVCpu, "VM-exit", rcStrict); 2869 2876 # endif 2870 2877 } … … 7248 7255 7249 7256 /* CPL. */ 7250 if (pVCpu->iem.s.uCpl > 0) 7257 if (pVCpu->iem.s.uCpl == 0) 7258 { /* likely */ } 7259 else 7251 7260 { 7252 7261 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl)); … … 7256 7265 7257 7266 /* Current VMCS valid. */ 7258 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 7267 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 7268 { /* likely */ } 7269 else 7259 7270 { 7260 7271 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu))); … … 7335 7346 /* 7336 7347 * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state. 7337 * So we save the required force flags here (currently only VMCPU_FF_BLOCK_NMI) so we 7338 * can restore it on VM-exit when required. 7348 * So we save the the VMCPU_FF_BLOCK_NMI force-flag here so we can restore it on 7349 * VM-exit when required. 7350 * See Intel spec. 26.7 "VM-entry Failures During or After Loading Guest State" 7339 7351 */ 7340 iemVmxVmentrySave ForceFlags(pVCpu);7352 iemVmxVmentrySaveNmiBlockingFF(pVCpu); 7341 7353 7342 7354 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr); … … 7400 7412 iemVmxVmentrySetupMtf(pVCpu, pszInstr); 7401 7413 7402 /* Now that we've switched page tables, we can inject events if any. */ 7403 iemVmxVmentryInjectEvent(pVCpu, pszInstr); 7404 7405 /* 7406 * We've successfully entered nested-guest execution at this point. 7407 * Return after setting nested-guest EM execution policy as necessary. 7408 */ 7409 IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr); 7414 /* Now that we've switched page tables, we can go ahead and inject any event. */ 7415 rcStrict = iemVmxVmentryInjectEvent(pVCpu, pszInstr); 7416 if (RT_SUCCESS(rcStrict)) 7417 { 7418 /* Reschedule to IEM-only execution of the nested-guest or return VINF_SUCCESS. */ 7419 IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr, VINF_SUCCESS); 7420 } 7421 7422 Log(("%s: VM-entry event injection failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 7423 return rcStrict; 7410 7424 } 7411 7425 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED); … … 7544 7558 7545 7559 /* CPL. */ 7546 if (pVCpu->iem.s.uCpl > 0) 7560 if (pVCpu->iem.s.uCpl == 0) 7561 { /* likely */ } 7562 else 7547 7563 { 7548 7564 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); … … 7574 7590 7575 7591 /* Supported VMCS field. */ 7576 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc)) 7592 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc)) 7593 { /* likely */ } 7594 else 7577 7595 { 7578 7596 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc)); … … 7763 7781 7764 7782 /* CPL. */ 7765 if (pVCpu->iem.s.uCpl > 0) 7783 if (pVCpu->iem.s.uCpl == 0) 7784 { /* likely */ } 7785 else 7766 7786 { 7767 7787 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); … … 7821 7841 7822 7842 /* Supported VMCS field. */ 7823 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc)) 7843 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc)) 7844 { /* likely */ } 7845 else 7824 7846 { 7825 7847 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc)); … … 7912 7934 7913 7935 /* CPL. */ 7914 if (pVCpu->iem.s.uCpl > 0) 7936 if (pVCpu->iem.s.uCpl == 0) 7937 { /* likely */ } 7938 else 7915 7939 { 7916 7940 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); … … 8031 8055 8032 8056 /* CPL. */ 8033 if (pVCpu->iem.s.uCpl > 0) 8057 if (pVCpu->iem.s.uCpl == 0) 8058 { /* likely */ } 8059 else 8034 8060 { 8035 8061 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); … … 8081 8107 8082 8108 /* CPL. */ 8083 if (pVCpu->iem.s.uCpl > 0) 8109 if (pVCpu->iem.s.uCpl == 0) 8110 { /* likely */ } 8111 else 8084 8112 { 8085 8113 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); … … 8236 8264 { 8237 8265 /* CPL. */ 8238 if (pVCpu->iem.s.uCpl > 0) 8266 if (pVCpu->iem.s.uCpl == 0) 8267 { /* likely */ } 8268 else 8239 8269 { 8240 8270 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); … … 8244 8274 8245 8275 /* A20M (A20 Masked) mode. */ 8246 if (!PGMPhysIsA20Enabled(pVCpu)) 8276 if (PGMPhysIsA20Enabled(pVCpu)) 8277 { /* likely */ } 8278 else 8247 8279 { 8248 8280 Log(("vmxon: A20M mode -> #GP(0)\n")); … … 8430 8462 8431 8463 /* CPL. */ 8432 if (pVCpu->iem.s.uCpl > 0) 8464 if (pVCpu->iem.s.uCpl == 0) 8465 { /* likely */ } 8466 else 8433 8467 { 8434 8468 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
Note:
See TracChangeset
for help on using the changeset viewer.