Changeset 86183 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Sep 20, 2020 11:58:23 AM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r85806 r86183 990 990 991 991 /* Apply the exceptions intercepts needed by the GIM provider. */ 992 if (pVCpu0->hm.s.fGIMTrapXcptUD )992 if (pVCpu0->hm.s.fGIMTrapXcptUD || pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit) 993 993 pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD); 994 994 … … 1104 1104 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1105 1105 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1106 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1107 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1108 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1106 if (!pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit) 1107 { 1108 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1109 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1110 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1111 } 1112 else 1113 { 1114 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 1115 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 1116 hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE); 1117 } 1109 1118 pVmcbCtrl0->u64MSRPMPhysAddr = pVCpu0->hm.s.svm.HCPhysMsrBitmap; 1110 1119 … … 2102 2111 { 2103 2112 /* Trap #UD for GIM provider (e.g. for hypercalls). */ 2104 if (pVCpu->hm.s.fGIMTrapXcptUD )2113 if (pVCpu->hm.s.fGIMTrapXcptUD || pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit) 2105 2114 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD); 2106 2115 else … … 2743 2752 } 2744 2753 2745 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 2754 if ( (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 2755 && !pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit /* Intercepted. AMD-V would clear the high 32 bits of EIP & ESP. */) 2746 2756 { 2747 2757 pCtx->SysEnter.cs = pVmcbGuest->u64SysEnterCS; … … 7248 7258 Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb); 7249 7259 7250 int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT; 7260 /** @todo if we accumulate more optional stuff here, we ought to combine the 7261 * reading of opcode bytes to avoid doing more than once. */ 7262 7263 VBOXSTRICTRC rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT; 7251 7264 if (pVCpu->hm.s.fGIMTrapXcptUD) 7252 7265 { 7253 7266 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7254 7267 uint8_t cbInstr = 0; 7255 VBOXSTRICTRCrcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);7268 rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr); 7256 7269 if (rcStrict == VINF_SUCCESS) 7257 7270 { 7258 7271 /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */ 7259 7272 hmR0SvmAdvanceRip(pVCpu, cbInstr); 7260 rc = VINF_SUCCESS;7261 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc );7273 rcStrict = VINF_SUCCESS; 7274 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 7262 7275 } 7263 7276 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING) 7264 rc = VINF_SUCCESS;7277 rcStrict = VINF_SUCCESS; 7265 7278 else if (rcStrict == VINF_GIM_R3_HYPERCALL) 7266 rc = VINF_GIM_R3_HYPERCALL;7279 rcStrict = VINF_GIM_R3_HYPERCALL; 7267 7280 else 7281 { 7268 7282 Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict))); 7283 rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT; 7284 } 7285 } 7286 7287 if (pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit) 7288 { 7289 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS 7290 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); 7291 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 7292 { 7293 /* Ideally, IEM should just handle all these special #UD situations, but 7294 we don't quite trust things to behave optimially when doing that. So, 7295 for now we'll restrict ourselves to a handful of possible sysenter and 7296 sysexit encodings that we filter right here. */ 7297 uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX]; 7298 uint8_t cbInstr = pVmcb->ctrl.cbInstrFetched; 7299 uint32_t const uCpl = CPUMGetGuestCPL(pVCpu); 7300 uint8_t const cbMin = uCpl != 0 ? 2 : 1 + 2; 7301 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; 7302 if (cbInstr < cbMin || cbInstr > SVM_CTRL_GUEST_INSTR_BYTES_MAX) 7303 { 7304 cbInstr = cbMin; 7305 int rc2 = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, GCPtrInstr, cbInstr); 7306 AssertRCStmt(rc2, cbInstr = 0); 7307 } 7308 else 7309 memcpy(abInstr, pVmcb->ctrl.abInstr, cbInstr); /* unlikely */ 7310 if ( cbInstr == 0 /* read error */ 7311 || (cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x34) /* sysenter */ 7312 || ( uCpl == 0 7313 && ( ( cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x35) /* sysexit */ 7314 || ( cbInstr >= 3 && abInstr[1] == 0x0f && abInstr[2] == 0x35 /* rex.w sysexit */ 7315 && (abInstr[0] & (X86_OP_REX_W | 0xf0)) == X86_OP_REX_W)))) 7316 { 7317 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7318 | CPUMCTX_EXTRN_SREG_MASK /* without ES+DS+GS the app will #GP later - go figure */); 7319 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl)); 7320 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPtrInstr, abInstr, cbInstr); 7321 Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: rcStrict=%Rrc %04x:%08RX64 %08RX64 %04x:%08RX64\n", 7322 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, 7323 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp)); 7324 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 7325 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); /** @todo Lazy bird. */ 7326 if (rcStrict == VINF_IEM_RAISED_XCPT) 7327 rcStrict = VINF_SUCCESS; 7328 return VBOXSTRICTRC_TODO(rcStrict); 7329 } 7330 Log6(("hmR0SvmExitXcptUD: not sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl)); 7331 } 7332 else 7333 Log6(("hmR0SvmExitXcptUD: not in long mode at %04x:%llx\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 7269 7334 } 7270 7335 7271 7336 /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */ 7272 if (RT_FAILURE(rc ))7337 if (RT_FAILURE(rcStrict)) 7273 7338 { 7274 7339 hmR0SvmSetPendingXcptUD(pVCpu); 7275 rc = VINF_SUCCESS;7340 rcStrict = VINF_SUCCESS; 7276 7341 } 7277 7342 7278 7343 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 7279 return rc;7344 return VBOXSTRICTRC_TODO(rcStrict); 7280 7345 } 7281 7346
Note:
See TracChangeset
for help on using the changeset viewer.