VirtualBox

Changeset 86183 in vbox for trunk/src


Ignore:
Timestamp:
Sep 20, 2020 11:58:23 AM (4 years ago)
Author:
vboxsync
Message:

VMM: Implemented sysenter and sysexit in IEM (limited testing). Added an longmode emulation of sysenter/sysexit to SVM.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r84478 r86183  
    42174217    /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
    42184218     *        on sysret. */
     4219
     4220    /* Flush the prefetch buffer. */
     4221#ifdef IEM_WITH_CODE_TLB
     4222    pVCpu->iem.s.pbInstrBuf = NULL;
     4223#else
     4224    pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
     4225#endif
     4226
     4227    return VINF_SUCCESS;
     4228}
     4229
     4230
     4231/**
     4232 * Implements SYSENTER (Intel, 32-bit AMD).
     4233 */
     4234IEM_CIMPL_DEF_0(iemCImpl_sysenter)
     4235{
     4236    RT_NOREF(cbInstr);
     4237
     4238    /*
     4239     * Check preconditions.
     4240     *
     4241     * Note that CPUs described in the documentation may load a few odd values
     4242     * into CS and SS than we allow here.  This has yet to be checked on real
     4243     * hardware.
     4244     */
     4245    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
     4246    {
     4247        Log(("sysenter: not supported -=> #UD\n"));
     4248        return iemRaiseUndefinedOpcode(pVCpu);
     4249    }
     4250    if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
     4251    {
     4252        Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
     4253        return iemRaiseGeneralProtectionFault0(pVCpu);
     4254    }
     4255    bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
     4256    if (IEM_IS_GUEST_CPU_AMD(pVCpu) && !fIsLongMode)
     4257    {
     4258        Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
     4259        return iemRaiseUndefinedOpcode(pVCpu);
     4260    }
     4261    IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
     4262    uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
     4263    if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
     4264    {
     4265        Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
     4266        return iemRaiseGeneralProtectionFault0(pVCpu);
     4267    }
     4268
     4269    /* This test isn't in the docs, it's just a safeguard against missing
     4270       canonical checks when writing the registers. */
     4271    if (RT_LIKELY(   !fIsLongMode
     4272                  || (   IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
     4273                      && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
     4274    { /* likely */ }
     4275    else
     4276    {
     4277        Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
     4278             pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
     4279        return iemRaiseUndefinedOpcode(pVCpu);
     4280    }
     4281
     4282/** @todo Test: Sysenter from ring-0, ring-1 and ring-2.  */
     4283
     4284    /*
     4285     * Update registers and commit.
     4286     */
     4287    if (fIsLongMode)
     4288    {
     4289        Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
     4290             pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
     4291        pVCpu->cpum.GstCtx.rip          = pVCpu->cpum.GstCtx.SysEnter.eip;
     4292        pVCpu->cpum.GstCtx.rsp          = pVCpu->cpum.GstCtx.SysEnter.esp;
     4293        pVCpu->cpum.GstCtx.cs.Attr.u    = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
     4294                                        | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
     4295    }
     4296    else
     4297    {
     4298        Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, (uint32_t)pVCpu->cpum.GstCtx.rip,
     4299             pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
     4300        pVCpu->cpum.GstCtx.rip          = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
     4301        pVCpu->cpum.GstCtx.rsp          = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
     4302        pVCpu->cpum.GstCtx.cs.Attr.u    = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
     4303                                        | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
     4304    }
     4305    pVCpu->cpum.GstCtx.cs.Sel           = uNewCs & X86_SEL_MASK_OFF_RPL;
     4306    pVCpu->cpum.GstCtx.cs.ValidSel      = uNewCs & X86_SEL_MASK_OFF_RPL;
     4307    pVCpu->cpum.GstCtx.cs.u64Base       = 0;
     4308    pVCpu->cpum.GstCtx.cs.u32Limit      = UINT32_MAX;
     4309    pVCpu->cpum.GstCtx.cs.fFlags        = CPUMSELREG_FLAGS_VALID;
     4310
     4311    pVCpu->cpum.GstCtx.ss.Sel           = uNewCs & X86_SEL_MASK_OFF_RPL + 8;
     4312    pVCpu->cpum.GstCtx.ss.ValidSel      = uNewCs & X86_SEL_MASK_OFF_RPL + 8;
     4313    pVCpu->cpum.GstCtx.ss.u64Base       = 0;
     4314    pVCpu->cpum.GstCtx.ss.u32Limit      = UINT32_MAX;
     4315    pVCpu->cpum.GstCtx.ss.Attr.u        = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
     4316                                        | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
     4317    pVCpu->cpum.GstCtx.ss.fFlags        = CPUMSELREG_FLAGS_VALID;
     4318
     4319    pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
     4320    pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
     4321    pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
     4322
     4323    pVCpu->iem.s.uCpl                   = 0;
     4324
     4325    /* Flush the prefetch buffer. */
     4326#ifdef IEM_WITH_CODE_TLB
     4327    pVCpu->iem.s.pbInstrBuf = NULL;
     4328#else
     4329    pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
     4330#endif
     4331
     4332    return VINF_SUCCESS;
     4333}
     4334
     4335
     4336/**
     4337 * Implements SYSEXIT (Intel, 32-bit AMD).
     4338 *
     4339 * @param   enmEffOpSize    The effective operand size.
     4340 */
     4341IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
     4342{
     4343    RT_NOREF(cbInstr);
     4344
     4345    /*
     4346     * Check preconditions.
     4347     *
     4348     * Note that CPUs described in the documentation may load a few odd values
     4349     * into CS and SS than we allow here.  This has yet to be checked on real
     4350     * hardware.
     4351     */
     4352    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
     4353    {
     4354        Log(("sysexit: not supported -=> #UD\n"));
     4355        return iemRaiseUndefinedOpcode(pVCpu);
     4356    }
     4357    if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
     4358    {
     4359        Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
     4360        return iemRaiseGeneralProtectionFault0(pVCpu);
     4361    }
     4362    bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
     4363    if (IEM_IS_GUEST_CPU_AMD(pVCpu) && !fIsLongMode)
     4364    {
     4365        Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
     4366        return iemRaiseUndefinedOpcode(pVCpu);
     4367    }
     4368    if (pVCpu->iem.s.uCpl != 0)
     4369    {
     4370        Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", pVCpu->iem.s.uCpl));
     4371        return iemRaiseGeneralProtectionFault0(pVCpu);
     4372    }
     4373    IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
     4374    uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
     4375    if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
     4376    {
     4377        Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
     4378        return iemRaiseGeneralProtectionFault0(pVCpu);
     4379    }
     4380
     4381    /*
     4382     * Update registers and commit.
     4383     */
     4384    if (enmEffOpSize == IEMMODE_64BIT)
     4385    {
     4386        Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
     4387             pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
     4388        pVCpu->cpum.GstCtx.rip          = pVCpu->cpum.GstCtx.rdx;
     4389        pVCpu->cpum.GstCtx.rsp          = pVCpu->cpum.GstCtx.rcx;
     4390        pVCpu->cpum.GstCtx.cs.Attr.u    = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
     4391                                        | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
     4392        pVCpu->cpum.GstCtx.cs.Sel       = (uNewCs | 3) + 32;
     4393        pVCpu->cpum.GstCtx.cs.ValidSel  = (uNewCs | 3) + 32;
     4394        pVCpu->cpum.GstCtx.ss.Sel       = (uNewCs | 3) + 40;
     4395        pVCpu->cpum.GstCtx.ss.ValidSel  = (uNewCs | 3) + 40;
     4396    }
     4397    else
     4398    {
     4399        Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip,
     4400             pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
     4401        pVCpu->cpum.GstCtx.rip          = pVCpu->cpum.GstCtx.edx;
     4402        pVCpu->cpum.GstCtx.rsp          = pVCpu->cpum.GstCtx.ecx;
     4403        pVCpu->cpum.GstCtx.cs.Attr.u    = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
     4404                                        | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
     4405        pVCpu->cpum.GstCtx.cs.Sel       = (uNewCs | 3) + 16;
     4406        pVCpu->cpum.GstCtx.cs.ValidSel  = (uNewCs | 3) + 16;
     4407        pVCpu->cpum.GstCtx.ss.Sel       = (uNewCs | 3) + 24;
     4408        pVCpu->cpum.GstCtx.ss.ValidSel  = (uNewCs | 3) + 24;
     4409    }
     4410    pVCpu->cpum.GstCtx.cs.u64Base       = 0;
     4411    pVCpu->cpum.GstCtx.cs.u32Limit      = UINT32_MAX;
     4412    pVCpu->cpum.GstCtx.cs.fFlags        = CPUMSELREG_FLAGS_VALID;
     4413
     4414    pVCpu->cpum.GstCtx.ss.u64Base       = 0;
     4415    pVCpu->cpum.GstCtx.ss.u32Limit      = UINT32_MAX;
     4416    pVCpu->cpum.GstCtx.ss.Attr.u        = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
     4417                                        | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
     4418    pVCpu->cpum.GstCtx.ss.fFlags        = CPUMSELREG_FLAGS_VALID;
     4419    pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
     4420
     4421    pVCpu->iem.s.uCpl                   = 3;
    42194422
    42204423    /* Flush the prefetch buffer. */
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r84477 r86183  
    26112611
    26122612/** Opcode 0x0f 0x34. */
    2613 FNIEMOP_STUB(iemOp_sysenter);
     2613FNIEMOP_DEF(iemOp_sysenter)
     2614{
     2615    IEMOP_MNEMONIC0(FIXED, SYSENTER, sysenter, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0);
     2616    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     2617    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysenter);
     2618}
     2619
    26142620/** Opcode 0x0f 0x35. */
    2615 FNIEMOP_STUB(iemOp_sysexit);
     2621FNIEMOP_DEF(iemOp_sysexit)
     2622{
     2623    IEMOP_MNEMONIC0(FIXED, SYSEXIT, sysexit, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0);
     2624    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
     2625    return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_sysexit, pVCpu->iem.s.enmEffOpSize);
     2626}
     2627
    26162628/** Opcode 0x0f 0x37. */
    26172629FNIEMOP_STUB(iemOp_getsec);
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r85806 r86183  
    990990
    991991    /* Apply the exceptions intercepts needed by the GIM provider. */
    992     if (pVCpu0->hm.s.fGIMTrapXcptUD)
     992    if (pVCpu0->hm.s.fGIMTrapXcptUD || pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
    993993        pVmcbCtrl0->u32InterceptXcpt |= RT_BIT(X86_XCPT_UD);
    994994
     
    11041104    hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    11051105    hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1106     hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1107     hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1108     hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1106    if (!pVCpu0->hm.s.svm.fEmulateLongModeSysEnterExit)
     1107    {
     1108        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1109        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1110        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1111    }
     1112    else
     1113    {
     1114        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1115        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1116        hmR0SvmSetMsrPermission(pVCpu0, pbMsrBitmap0, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1117    }
    11091118    pVmcbCtrl0->u64MSRPMPhysAddr = pVCpu0->hm.s.svm.HCPhysMsrBitmap;
    11101119
     
    21022111    {
    21032112        /* Trap #UD for GIM provider (e.g. for hypercalls). */
    2104         if (pVCpu->hm.s.fGIMTrapXcptUD)
     2113        if (pVCpu->hm.s.fGIMTrapXcptUD || pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
    21052114            hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD);
    21062115        else
     
    27432752        }
    27442753
    2745         if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     2754        if (   (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     2755            && !pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit /* Intercepted. AMD-V would clear the high 32 bits of EIP & ESP. */)
    27462756        {
    27472757            pCtx->SysEnter.cs  = pVmcbGuest->u64SysEnterCS;
     
    72487258    Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid);  NOREF(pVmcb);
    72497259
    7250     int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT;
     7260    /** @todo if we accumulate more optional stuff here, we ought to combine the
     7261     *        reading of opcode bytes to avoid doing more than once.  */
     7262
     7263    VBOXSTRICTRC rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
    72517264    if (pVCpu->hm.s.fGIMTrapXcptUD)
    72527265    {
    72537266        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
    72547267        uint8_t cbInstr = 0;
    7255         VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
     7268        rcStrict = GIMXcptUD(pVCpu, &pVCpu->cpum.GstCtx, NULL /* pDis */, &cbInstr);
    72567269        if (rcStrict == VINF_SUCCESS)
    72577270        {
    72587271            /* #UD #VMEXIT does not have valid NRIP information, manually advance RIP. See @bugref{7270#c170}. */
    72597272            hmR0SvmAdvanceRip(pVCpu, cbInstr);
    7260             rc = VINF_SUCCESS;
    7261             HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
     7273            rcStrict = VINF_SUCCESS;
     7274            HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
    72627275        }
    72637276        else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING)
    7264             rc = VINF_SUCCESS;
     7277            rcStrict = VINF_SUCCESS;
    72657278        else if (rcStrict == VINF_GIM_R3_HYPERCALL)
    7266             rc = VINF_GIM_R3_HYPERCALL;
     7279            rcStrict = VINF_GIM_R3_HYPERCALL;
    72677280        else
     7281        {
    72687282            Assert(RT_FAILURE(VBOXSTRICTRC_VAL(rcStrict)));
     7283            rcStrict = VERR_SVM_UNEXPECTED_XCPT_EXIT;
     7284        }
     7285    }
     7286
     7287    if (pVCpu->hm.s.svm.fEmulateLongModeSysEnterExit)
     7288    {
     7289        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
     7290                                        | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
     7291        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
     7292        {
     7293            /* Ideally, IEM should just handle all these special #UD situations, but
     7294               we don't quite trust things to behave optimially when doing that.  So,
     7295               for now we'll restrict ourselves to a handful of possible sysenter and
     7296               sysexit encodings that we filter right here. */
     7297            uint8_t abInstr[SVM_CTRL_GUEST_INSTR_BYTES_MAX];
     7298            uint8_t cbInstr = pVmcb->ctrl.cbInstrFetched;
     7299            uint32_t const uCpl = CPUMGetGuestCPL(pVCpu);
     7300            uint8_t const cbMin = uCpl != 0 ? 2 : 1 + 2;
     7301            RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
     7302            if (cbInstr < cbMin || cbInstr > SVM_CTRL_GUEST_INSTR_BYTES_MAX)
     7303            {
     7304                cbInstr = cbMin;
     7305                int rc2 = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, GCPtrInstr, cbInstr);
     7306                AssertRCStmt(rc2, cbInstr = 0);
     7307            }
     7308            else
     7309                memcpy(abInstr, pVmcb->ctrl.abInstr, cbInstr); /* unlikely */
     7310            if (   cbInstr == 0 /* read error */
     7311                || (cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x34) /* sysenter */
     7312                || (   uCpl == 0
     7313                    && (   (   cbInstr >= 2 && abInstr[0] == 0x0f && abInstr[1] == 0x35) /* sysexit */
     7314                        || (   cbInstr >= 3 && abInstr[1] == 0x0f && abInstr[2] == 0x35  /* rex.w sysexit */
     7315                            && (abInstr[0] & (X86_OP_REX_W | 0xf0)) == X86_OP_REX_W))))
     7316            {
     7317                HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
     7318                                                | CPUMCTX_EXTRN_SREG_MASK /* without ES+DS+GS the app will #GP later - go figure */);
     7319                Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
     7320                rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), GCPtrInstr, abInstr, cbInstr);
     7321                Log6(("hmR0SvmExitXcptUD: sysenter/sysexit: rcStrict=%Rrc %04x:%08RX64 %08RX64 %04x:%08RX64\n",
     7322                     VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u,
     7323                     pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp));
     7324                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
     7325                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); /** @todo Lazy bird. */
     7326                if (rcStrict == VINF_IEM_RAISED_XCPT)
     7327                    rcStrict = VINF_SUCCESS;
     7328                return VBOXSTRICTRC_TODO(rcStrict);
     7329            }
     7330            Log6(("hmR0SvmExitXcptUD: not sysenter/sysexit: %.*Rhxs at %#llx CPL=%u\n", cbInstr, abInstr, GCPtrInstr, uCpl));
     7331        }
     7332        else
     7333            Log6(("hmR0SvmExitXcptUD: not in long mode at %04x:%llx\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    72697334    }
    72707335
    72717336    /* If the GIM #UD exception handler didn't succeed for some reason or wasn't needed, raise #UD. */
    7272     if (RT_FAILURE(rc))
     7337    if (RT_FAILURE(rcStrict))
    72737338    {
    72747339        hmR0SvmSetPendingXcptUD(pVCpu);
    7275         rc = VINF_SUCCESS;
     7340        rcStrict = VINF_SUCCESS;
    72767341    }
    72777342
    72787343    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
    7279     return rc;
     7344    return VBOXSTRICTRC_TODO(rcStrict);
    72807345}
    72817346
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r85854 r86183  
    18421842
    18431843    /*
     1844     * Determine whether we need to intercept #UD in SVM mode for emulating
     1845     * intel SYSENTER/SYSEXIT on AMD64, as these instructions results in #UD
     1846     * when executed in long-mode.  This is only really applicable when
     1847     * non-default CPU profiles are in effect, i.e. guest vendor differs
     1848     * from the host one.
     1849     */
     1850    if (CPUMGetGuestCpuVendor(pVM) != CPUMGetHostCpuVendor(pVM))
     1851        switch (CPUMGetGuestCpuVendor(pVM))
     1852        {
     1853            case CPUMCPUVENDOR_INTEL:
     1854            case CPUMCPUVENDOR_VIA: /*?*/
     1855            case CPUMCPUVENDOR_SHANGHAI: /*?*/
     1856                switch (CPUMGetHostCpuVendor(pVM))
     1857                {
     1858                    case CPUMCPUVENDOR_AMD:
     1859                    case CPUMCPUVENDOR_HYGON:
     1860                        if (pVM->hm.s.fAllow64BitGuests)
     1861                        {
     1862                            LogRel(("HM: Intercepting #UD for emulating SYSENTER/SYSEXIT in long mode.\n"));
     1863                            for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     1864                                pVM->apCpusR3[idCpu]->hm.s.svm.fEmulateLongModeSysEnterExit = true;
     1865                        }
     1866                        break;
     1867                    default: break;
     1868                }
     1869            default: break;
     1870        }
     1871
     1872    /*
    18441873     * Call ring-0 to set up the VM.
    18451874     */
  • trunk/src/VBox/VMM/include/HMInternal.h

    r85854 r86183  
    894894    /** Whether \#UD needs to be intercepted (required by certain GIM providers). */
    895895    bool                        fGIMTrapXcptUD;
    896     /** Whether \#GP needs to be intercept for mesa driver workaround. */
     896    /** Whether \#GP needs to be intercepted for mesa driver workaround. */
    897897    bool                        fTrapXcptGpForLovelyMesaDrv;
    898898    /** Whether we're executing a single instruction. */
     
    10151015             *  we should check if the VTPR changed on every VM-exit. */
    10161016            bool                        fSyncVTpr;
    1017             uint8_t                     au8Alignment0[7];
     1017            /** Whether to emulate long mode support for sysenter/sysexit like intel CPUs
     1018             *  does.   This means intercepting \#UD to emulate the instructions in
     1019             *  long-mode and to intercept reads and writes to the SYSENTER MSRs in order to
     1020             *  preserve the upper 32 bits written to them (AMD will ignore and discard). */
     1021            bool                        fEmulateLongModeSysEnterExit;
     1022            uint8_t                     au8Alignment0[6];
    10181023
    10191024            /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette