VirtualBox

Changeset 86183 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Sep 20, 2020 11:58:23 AM (4 years ago)
Author:
vboxsync
Message:

VMM: Implemented sysenter and sysexit in IEM (limited testing). Added an longmode emulation of sysenter/sysexit to SVM.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r85806 r86183  
    990990
    991991    /* Apply the exceptions intercepts needed by the GIM provider. */
    992     if (pVCpu0->hm.s.fGIMTrapXcptUD)
     992    if (pVCpu0->hm.s.fGIMTrapXcptUD || pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
    993993        pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
    994994
     
    11041104    hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    11051105    hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1106     hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1107     hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1108     hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1106    if (!pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
     1107    {
     1108        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1109        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1110        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1111    }
     1112    else
     1113    {
     1114        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1115        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1116        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1117    }
    11091118    pVmcbCtrl0->u64MSRPMPhysAddr = pVCpu0->hm.s.svm.HCPhysMsrBitmap;
    11101119
     
    21022111    {
    21032112        /* Trap #UD for GIM provider (e.g. for hypercalls). */
    2104         if (pVCpu->hm.s.fGIMTrapXcptUD)
     2113        if (pVCpu->hm.s.fGIMTrapXcptUD || pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
    21052114            hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD);
    21062115        else
     
    27432752        }
    27442753
    2745         if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     2754        if (   (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     2755            && !pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit /* Intercepted. AMD-V would clear the high 32 bits of EIP & ESP. */)
    27462756        {
    27472757            pCtx->SysEnter.cs  = pVmcbGuest->u64SysEnterCS;
     
    72487258    Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid);  NOREF(pVmcb);
    72497259
    7250     int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT;
     7260    /** @todo if we accumulate more optional stuff here, we ought to combine the
     7261     *        reading of opcode bytes to avoid doing more than once.  */
     7262
     7263    VBOXSTRICTRC rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
    72517264    if (pVCpu->hm.s.fGIMTrapXcptUD)
    72527265    {
    72537266        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    72547267        uint8_t cbInstr = 0;
    7255         VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
     7268        rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
    72567269        if (rcStrict == VINF_SUCCESS)
    72577270        {
    72587271            /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
    72597272            hmR0SvmAdvanceRip(pVCpu, cbInstr);
    7260             rc = VINF_SUCCESS;
    7261             HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
     7273            rcStrict = VINF_SUCCESS;
     7274            HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
    72627275        }
    72637276        else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
    7264             rc = VINF_SUCCESS;
     7277            rcStrict = VINF_SUCCESS;
    72657278        else if (rcStrict == VINF_GIM_R3_HYPERCALL)
    7266             rc = VINF_GIM_R3_HYPERCALL;
     7279            rcStrict = VINF_GIM_R3_HYPERCALL;
    72677280        else
     7281        {
    72687282            Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
     7283            rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
     7284        }
     7285    }
     7286
     7287    if (pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
     7288    {
     7289        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
     7290                                        | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
     7291        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
     7292        {
     7293            /* Ideally, IEM should just handle all these special #UD situations, but
     7294               we don't quite trust things to behave optimially when doing that.  So,
     7295               for now we'll restrict ourselves to a handful of possible sysenter and
     7296               sysexit encodings that we filter right here. */
     7297            uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX];
     7298            uint8_t cbInstr = pVmcb->ctrl.cbInstrFetched;
     7299            uint32_t const uCpl = CPUMGetGuestCPL(pVCpu);
     7300            uint8_t const cbMin = uCpl != 0 ? 2 : 1 + 2;
     7301            RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
     7302            if (cbInstr < cbMin || cbInstr > SVM_CTRL_GUEST_INSTR_BYTES_MAX)
     7303            {
     7304                cbInstr = cbMin;
     7305                int rc2 = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, GCPtrInstr, cbInstr);
     7306                AssertRCStmt(rc2, cbInstr = 0);
     7307            }
     7308            else
     7309                memcpy(abInstr, pVmcb->ctrl.abInstr, cbInstr); /* unlikely */
     7310            if (   cbInstr == 0 /* read error */
     7311                || (cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x34) /* sysenter */
     7312                || (   uCpl == 0
     7313                    && (   (   cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x35) /* sysexit */
     7314                        || (   cbInstr >= 3 && abInstr[1] == 0x0f && abInstr[2] == 0x35  /* rex.w sysexit */
     7315                            && (abInstr[0] & (X86_OP_REX_W | 0xf0)) == X86_OP_REX_W))))
     7316            {
     7317                HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
     7318                                                | CPUMCTX_EXTRN_SREG_MASK /* without ES+DS+GS the app will #GP later - go figure */);
     7319                Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
     7320                rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPtrInstr, abInstr, cbInstr);
     7321                Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: rcStrict=%Rrc %04x:%08RX64 %08RX64 %04x:%08RX64\n",
     7322                     VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u,
     7323                     pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp));
     7324                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
     7325                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); /** @todo Lazy bird. */
     7326                if (rcStrict == VINF_IEM_RAISED_XCPT)
     7327                    rcStrict = VINF_SUCCESS;
     7328                return VBOXSTRICTRC_TODO(rcStrict);
     7329            }
     7330            Log6(("hmR0SvmExitXcptUD: not sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
     7331        }
     7332        else
     7333            Log6(("hmR0SvmExitXcptUD: not in long mode at %04x:%llx\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    72697334    }
    72707335
    72717336    /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
    7272     if (RT_FAILURE(rc))
     7337    if (RT_FAILURE(rcStrict))
    72737338    {
    72747339        hmR0SvmSetPendingXcptUD(pVCpu);
    7275         rc = VINF_SUCCESS;
     7340        rcStrict = VINF_SUCCESS;
    72767341    }
    72777342
    72787343    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
    7279     return rc;
     7344    return VBOXSTRICTRC_TODO(rcStrict);
    72807345}
    72817346
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette