VirtualBox

Changeset 70700 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Jan 23, 2018 10:34:56 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
120441
Message:

VMM/HM: Nested Hw.virt: SVM fixes.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r70465 r70700  
    4040#ifdef DEBUG_ramshankar
    4141# define HMSVM_SYNC_FULL_GUEST_STATE
     42# define HMSVM_SYNC_FULL_NESTED_GUEST_STATE
    4243# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
    4344# define HMSVM_ALWAYS_TRAP_PF
     
    135136#else
    136137# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx)           do { NOREF((a_pCtx)); } while (0)
     138#endif
     139
     140/** Validate segment descriptor granularity bit. */
     141#ifdef VBOX_STRICT
     142# define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
     143    AssertMsg(   !pMixedCtx->reg.Attr.n.u1Present \
     144              || (   pMixedCtx->reg.Attr.n.u1Granularity \
     145                  ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
     146                  :  pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
     147              ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \
     148              pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
     149#else
     150# define HMSVM_ASSERT_SEG_GRANULARITY(reg)              do { } while (0)
    137151#endif
    138152
     
    380394#endif
    381395
     396
     397#ifdef VBOX_STRICT
     398# define HMSVM_LOG_CS          RT_BIT_32(0)
     399# define HMSVM_LOG_SS          RT_BIT_32(1)
     400# define HMSVM_LOG_FS          RT_BIT_32(2)
     401# define HMSVM_LOG_GS          RT_BIT_32(3)
     402# define HMSVM_LOG_LBR         RT_BIT_32(4)
     403# define HMSVM_LOG_ALL         (  HMSVM_LOG_CS \
     404                                | HMSVM_LOG_SS \
     405                                | HMSVM_LOG_FS \
     406                                | HMSVM_LOG_GS \
     407                                | HMSVM_LOG_LBR)
     408
     409/**
     410 * Dumps CPU state and additional info. to the logger for diagnostics.
     411 *
     412 * @param   pVCpu       The cross context virtual CPU structure.
     413 * @param   pVmcb       Pointer to the VM control block.
     414 * @param   pCtx        Pointer to the guest-CPU context.
     415 * @param   pszPrefix   Log prefix.
     416 * @param   fFlags      Log flags, see HMSVM_LOG_XXX.
     417 * @param   uVerbose    The verbosity level, currently unused.
     418 */
     419static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PCPUMCTX pCtx, const char *pszPrefix, uint32_t fFlags,
     420                             uint8_t uVerbose)
     421{
     422    RT_NOREF(uVerbose);
     423
     424    Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX32 cr0=%#RX32 cr3=%#RX32 cr4=%#RX32\n", pszPrefix, pCtx->cs.Sel, pCtx->rip,
     425          pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4));
     426    Log4(("%s: rsp=%#RX64 rbp=%#RX64 rdi=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp, pCtx->rdi));
     427    if (fFlags & HMSVM_LOG_CS)
     428    {
     429        Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
     430              pCtx->cs.u32Limit, pCtx->cs.Attr.u));
     431    }
     432    if (fFlags & HMSVM_LOG_SS)
     433    {
     434        Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
     435              pCtx->ss.u32Limit, pCtx->ss.Attr.u));
     436    }
     437    if (fFlags & HMSVM_LOG_FS)
     438    {
     439        Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
     440              pCtx->fs.u32Limit, pCtx->fs.Attr.u));
     441    }
     442    if (fFlags & HMSVM_LOG_GS)
     443    {
     444        Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
     445              pCtx->gs.u32Limit, pCtx->gs.Attr.u));
     446    }
     447
     448    PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
     449    if (fFlags & HMSVM_LOG_LBR)
     450    {
     451        Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM,
     452              pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO));
     453    }
     454}
     455#endif
     456
     457
    382458/**
    383459 * Sets up and activates AMD-V on the current CPU.
     
    14991575
    15001576/**
    1501  * Loads the guest segment registers into the VMCB.
     1577 * Loads the guest (or nested-guest) segment registers into the VMCB.
    15021578 *
    15031579 * @returns VBox status code.
     
    15601636
    15611637/**
    1562  * Loads the guest MSRs into the VMCB.
     1638 * Loads the guest (or nested-guest) MSRs into the VMCB.
    15631639 *
    15641640 * @param   pVCpu       The cross context virtual CPU structure.
     
    15901666    if (CPUMIsGuestInLongModeEx(pCtx))
    15911667    {
     1668        /* Load these always as the guest may modify FS/GS base using MSRs in 64-bit mode which we don't intercept. */
    15921669        pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
    15931670        pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
     1671        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
    15941672    }
    15951673    else
     
    21882266    Log4(("hmR0SvmLoadGuestState: SS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pCtx->ss.Sel, pCtx->ss.u64Base,
    21892267          pCtx->ss.u32Limit, pCtx->ss.Attr.u));
     2268    Log4(("hmR0SvmLoadGuestState: FS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pCtx->fs.Sel, pCtx->fs.u64Base,
     2269          pCtx->fs.u32Limit, pCtx->fs.Attr.u));
    21902270    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    21912271    return rc;
     
    23202400    AssertRCReturn(rc, rc);
    23212401
     2402    /*
     2403     * We need to load the entire state (including FS, GS etc.) as we could be continuing
     2404     * to execute the nested-guest at any point (not just immediately after VMRUN) and thus
     2405     * the VMCB can possibly be out-of-sync with the actual nested-guest state if it was
     2406     * executed in IEM.
     2407     */
    23222408    hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
    23232409    hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
     
    23562442               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    23572443
    2358     Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 (HyperCR3=%#RX64) CR4=%#RX32 "
    2359           "ESP=%#RX32 EBP=%#RX32 rc=%d\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->cr0, pCtx->cr3,
    2360           pVmcbNstGst->guest.u64CR3, pCtx->cr4, pCtx->esp, pCtx->ebp, rc));
    2361     Log4(("hmR0SvmLoadGuestStateNested: SS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pCtx->ss.Sel, pCtx->ss.u64Base,
    2362           pCtx->ss.u32Limit, pCtx->ss.Attr.u));
     2444#ifdef VBOX_STRICT
     2445    hmR0SvmLogState(pVCpu, pVmcbNstGst, pCtx, "hmR0SvmLoadGuestStateNested", HMSVM_LOG_ALL, 0 /* uVerbose */);
     2446#endif
    23632447    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    2364 
    23652448    return rc;
    23662449}
     
    24132496
    24142497/**
    2415  * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU context.
     2498 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
     2499 * context.
    24162500 *
    24172501 * Currently there is no residual state left in the CPU that is not updated in the
     
    24952579    }
    24962580
    2497 #ifdef VBOX_STRICT
    2498 # define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
    2499     AssertMsg(   !pMixedCtx->reg.Attr.n.u1Present \
    2500               || (   pMixedCtx->reg.Attr.n.u1Granularity \
    2501                   ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
    2502                   :  pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
    2503               ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \
    2504               pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
    2505 
    25062581    HMSVM_ASSERT_SEG_GRANULARITY(cs);
    25072582    HMSVM_ASSERT_SEG_GRANULARITY(ss);
     
    25102585    HMSVM_ASSERT_SEG_GRANULARITY(fs);
    25112586    HMSVM_ASSERT_SEG_GRANULARITY(gs);
    2512 
    2513 # undef HMSVM_ASSERT_SEL_GRANULARITY
    2514 #endif
    25152587
    25162588    /*
     
    25452617
    25462618    /*
    2547      * Guest Descriptor-Table registers.
     2619     * Guest Descriptor-Table registers (GDTR, IDTR, LDTR).
    25482620     */
    25492621    HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, LDTR, ldtr);
     
    25792651    }
    25802652
     2653#ifdef VBOX_STRICT
    25812654    if (CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx))
    2582     {
    2583         Log4(("hmR0SvmSaveGuestState: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 CR4=%#RX32 ESP=%#RX32 EBP=%#RX32\n",
    2584               pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->eflags.u, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
    2585               pMixedCtx->esp, pMixedCtx->ebp));
    2586         Log4(("hmR0SvmSaveGuestState: SS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pMixedCtx->ss.Sel, pMixedCtx->ss.u64Base,
    2587               pMixedCtx->ss.u32Limit, pMixedCtx->ss.Attr.u));
    2588         Log4(("hmR0SvmSaveGuestState: DBGCTL BR_FROM=%#RX64 BR_TO=%#RX64 XcptFrom=%#RX64 XcptTo=%#RX64\n",
    2589               pVmcb->guest.u64BR_FROM, pVmcb->guest.u64BR_TO,pVmcb->guest.u64LASTEXCPFROM, pVmcb->guest.u64LASTEXCPTO));
    2590     }
     2655        hmR0SvmLogState(pVCpu, pVmcb, pMixedCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */);
     2656#endif
    25912657}
    25922658
     
    33933459         * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
    33943460         */
    3395         PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    3396         if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    3397             && !fIntShadow
    3398             && !pVCpu->hm.s.fSingleInstruction
    3399             && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
    3400         {
    3401             if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR)
     3461        if (!fIntShadow)
     3462        {
     3463            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     3464            if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     3465                && !pVCpu->hm.s.fSingleInstruction
     3466                && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
    34023467            {
    3403                 Log4(("Intercepting external interrupt -> #VMEXIT\n"));
    3404                 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
     3468                if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR)
     3469                {
     3470                    Log4(("Intercepting external interrupt -> #VMEXIT\n"));
     3471                    return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
     3472                }
     3473
     3474                uint8_t u8Interrupt;
     3475                int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     3476                if (RT_SUCCESS(rc))
     3477                {
     3478                    Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
     3479
     3480                    SVMEVENT Event;
     3481                    Event.u = 0;
     3482                    Event.n.u1Valid  = 1;
     3483                    Event.n.u8Vector = u8Interrupt;
     3484                    Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
     3485
     3486                    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     3487                }
     3488                else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
     3489                {
     3490                    /*
     3491                     * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
     3492                     * updated eventually when the TPR is written by the guest.
     3493                     */
     3494                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
     3495                }
     3496                else
     3497                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    34053498            }
    34063499
    3407             uint8_t u8Interrupt;
    3408             int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    3409             if (RT_SUCCESS(rc))
     3500            /*
     3501             * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields)
     3502             * interrupt injection. The virtual interrupt injection itself, if any, will be done
     3503             * by the physical CPU.
     3504             */
     3505#if 0
     3506            if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
     3507                && (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
     3508                && CPUMCanSvmNstGstTakeVirtIntr(pCtx))
    34103509            {
    3411                 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
    3412 
    3413                 SVMEVENT Event;
    3414                 Event.u = 0;
    3415                 Event.n.u1Valid  = 1;
    3416                 Event.n.u8Vector = u8Interrupt;
    3417                 Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    3418 
    3419                 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     3510                Log4(("Intercepting virtual interrupt -> #VMEXIT\n"));
     3511                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
    34203512            }
    3421             else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    3422             {
    3423                 /*
    3424                  * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
    3425                  * updated eventually when the TPR is written by the guest.
    3426                  */
    3427                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    3428             }
    3429             else
    3430                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    3431         }
    3432 
    3433         /*
    3434          * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields)
    3435          * interrupt injection. The virtual interrupt injection itself, if any, will be done
    3436          * by the physical CPU.
    3437          */
    3438         if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
    3439             && (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
    3440             && CPUMCanSvmNstGstTakeVirtIntr(pCtx))
    3441         {
    3442             Log4(("Intercepting virtual interrupt -> #VMEXIT\n"));
    3443             return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
     3513#endif
    34443514        }
    34453515    }
     
    34833553        PSVMVMCB pVmcb        = pVCpu->hm.s.svm.pVmcb;
    34843554
    3485         Log4Func(("fGif=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fGif, fBlockInt, fIntShadow,
     3555        Log4Func(("fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fBlockInt, fIntShadow,
    34863556                  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
    34873557
     
    35633633    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    35643634
    3565     bool const fIsNestedGuest = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
    3566     bool const fIntShadow     = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
    3567     bool const fBlockInt      = !fIsNestedGuest ? !(pCtx->eflags.u32 & X86_EFL_IF) : CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
     3635#ifdef VBOX_STRICT
     3636    bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
     3637    bool const fGif       = pCtx->hwvirt.svm.fGif;
     3638    bool       fAllowInt  = fGif;
     3639    if (fGif)
     3640    {
     3641        if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3642            fAllowInt = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
     3643        else
     3644            fAllowInt = RT_BOOL(pCtx->eflags.u32 & X86_EFL_IF);
     3645    }
     3646#endif
    35683647
    35693648    if (pVCpu->hm.s.Event.fPending)
     
    35783657        if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
    35793658        {
    3580             Assert(!fBlockInt);
     3659            Assert(fAllowInt);
    35813660            Assert(!fIntShadow);
    35823661        }
    35833662        else if (Event.n.u3Type == SVM_EVENT_NMI)
     3663        {
     3664            Assert(fGif);
    35843665            Assert(!fIntShadow);
    3585         NOREF(fBlockInt);
     3666        }
    35863667
    35873668        /*
     
    39013982        if (rcStrict != VINF_SUCCESS)
    39023983            return VBOXSTRICTRC_VAL(rcStrict);
     3984        if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3985            return VINF_SVM_VMEXIT;
    39033986    }
    39043987
     
    39143997        return VINF_EM_RAW_INJECT_TRPM_EVENT;
    39153998    }
     3999
     4000#ifdef HMSVM_SYNC_FULL_NESTED_GUEST_STATE
     4001    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     4002#endif
    39164003
    39174004    /*
     
    47524839        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    47534840        rc = hmR0SvmPreRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient);
    4754         if (rc != VINF_SUCCESS)
     4841        if (   rc != VINF_SUCCESS
     4842            || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     4843        {
    47554844            break;
     4845        }
    47564846
    47574847        /*
     
    53885478        case SVM_EXIT_WRITE_CR4:
    53895479        case SVM_EXIT_WRITE_CR8:
     5480        {
     5481            uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0;
     5482            Log4(("hmR0SvmHandleExitNested: Write CR%u\n", uCr));
    53905483            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
     5484        }
    53915485
    53925486        case SVM_EXIT_PAUSE:
     
    65246618                HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    65256619            }
    6526             else if (pCtx->ecx == MSR_K6_EFER)
    6527                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
    6528             else if (pCtx->ecx == MSR_IA32_TSC)
    6529                 pSvmTransient->fUpdateTscOffsetting = true;
     6620            else
     6621            {
     6622                switch (pCtx->ecx)
     6623                {
     6624                    case MSR_K6_EFER:           HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR);         break;
     6625                    case MSR_IA32_TSC:          pSvmTransient->fUpdateTscOffsetting = true;             break;
     6626                    case MSR_K8_FS_BASE:
     6627                    case MSR_K8_GS_BASE:        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);     break;
     6628                    case MSR_IA32_SYSENTER_CS:  HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
     6629                    case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
     6630                    case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
     6631                }
     6632            }
    65306633        }
    65316634    }
     
    74827585    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    74837586
    7484     PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     7587    PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    74857588    uint8_t const  uVector  = pVmcb->ctrl.u64ExitCode - SVM_EXIT_EXCEPTION_0;
    74867589    uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1;
     
    74887591    Assert(uVector <= X86_XCPT_LAST);
    74897592    Log4(("hmR0SvmExitXcptGeneric: uVector=%#x uErrCode=%u\n", uVector, uErrCode));
    7490 
    74917593
    74927594    SVMEVENT Event;
     
    76077709    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
    76087710    VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
    7609 
    76107711    return VBOXSTRICTRC_VAL(rcStrict);
    76117712}
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette