VirtualBox

Ignore:
Timestamp:
Dec 5, 2017 9:48:57 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Nested Hw.virt: Fixes

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r69916 r69926  
    7878    } while (0)
    7979
     80/**
     81 * Updates interrupt shadow for the current RIP.
     82 */
     83#define HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx) \
     84    do { \
     85        /* Update interrupt shadow. */ \
     86        if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) \
     87            && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) \
     88            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); \
     89    } while (0)
     90
    8091/** Macro for upgrading a @a a_rc to VINF_EM_DBG_STEPPED after emulating an
    8192 * instruction that exited. */
     
    96107                                                        ("Illegal migration! Entered on CPU %u Current %u\n", \
    97108                                                        pVCpu->hm.s.idEnteredCpu, RTMpCpuId()));
     109
     110/** Assert that we're not executing a nested-guest. */
     111#ifdef VBOX_WITH_NESTED_HWVIRT
     112# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx)       Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
     113#else
     114# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx)       do { RT_NOREF((a_pCtx)); } while (0)
     115#endif
     116
     117/** Assert that we're executing a nested-guest. */
     118#ifdef VBOX_WITH_NESTED_HWVIRT
     119# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx)           Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
     120#else
     121# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx)           do { RT_NOREF((a_pCtx)); } while (0)
     122#endif
    98123
    99124/**
     
    314339static FNSVMEXITHANDLER hmR0SvmExitInvlpga;
    315340static FNSVMEXITHANDLER hmR0SvmExitVmrun;
    316 static FNSVMEXITHANDLER hmR0SvmNestedExitIret;
    317341static FNSVMEXITHANDLER hmR0SvmNestedExitXcptDB;
    318342static FNSVMEXITHANDLER hmR0SvmNestedExitXcptBP;
     
    838862
    839863/**
     864 * Gets a pointer to the currently active guest or nested-guest VMCB.
     865 *
     866 * @returns Pointer to the current context VMCB.
     867 * @param   pVCpu           The cross context virtual CPU structure.
     868 * @param   pCtx            Pointer to the guest-CPU context.
     869 */
     870DECLINLINE(PSVMVMCB) hmR0SvmGetCurrentVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
     871{
     872#ifdef VBOX_WITH_NESTED_HWVIRT
     873    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     874        return pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     875#else
     876    RT_NOREF(pCtx);
     877#endif
     878    return pVCpu->hm.s.svm.pVmcb;
     879}
     880
     881
     882/**
    840883 * Invalidates a guest page by guest virtual address.
    841884 *
     
    857900        Log4(("SVMR0InvalidatePage %RGv\n", GCVirt));
    858901
    859         PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     902        PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     903        PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    860904        AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
    861905
     
    17481792        pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
    17491793
    1750         /*
    1751          * CR3, CR4 reads and writes are intercepted as we modify them before
    1752          * hardware-assisted SVM execution. In addition, PGM needs to be up to date
    1753          * on paging mode changes in the nested-guest.
    1754          *
    1755          * CR0 writes are intercepted in case of paging mode changes. CR0 reads are not
    1756          * intercepted as we currently don't modify CR0 while executing the nested-guest.
    1757          */
    1758         pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(4) | RT_BIT(3);
    1759         pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(4) | RT_BIT(3) | RT_BIT(0);
     1794        /* Always intercept CR0, CR4 reads and writes as we alter them. */
     1795        pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(0) | RT_BIT(4);
     1796        pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(0) | RT_BIT(4);
     1797
     1798        /* Always intercept CR3 reads and writes without nested-paging as we load shadow page tables. */
     1799        if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
     1800        {
     1801            pVmcbNstGst->ctrl.u16InterceptRdCRx |= RT_BIT(3);
     1802            pVmcbNstGst->ctrl.u16InterceptWrCRx |= RT_BIT(3);
     1803        }
    17601804
    17611805        /** @todo Figure out debugging with nested-guests, till then just intercept
     
    17671811        pVmcbNstGst->ctrl.u64InterceptCtrl  |= pVmcb->ctrl.u64InterceptCtrl
    17681812                                            |  HMSVM_MANDATORY_NESTED_GUEST_CTRL_INTERCEPTS;
     1813
    17691814        /*
    17701815         * Remove control intercepts that we don't need while executing the nested-guest.
     
    19491994static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    19501995{
     1996    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
     1997
    19511998    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    19521999    AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
     
    22412288    pMixedCtx->cr2        = pVmcb->guest.u64CR2;
    22422289
    2243 #ifdef VBOX_WITH_NESTED_GUEST
    2244     /*
    2245      * The nested hypervisor might not be intercepting these control registers,
    2246      */
    2247     if (CPUMIsGuestInNestedHwVirtMode(pMixedCtx))
    2248     {
    2249         pMixedCtx->cr4    = pVmcb->guest.u64CR4;
    2250         pMixedCtx->cr0    = pVmcb->guest.u64CR0;
    2251     }
    2252 #endif
    2253 
    22542290    /*
    22552291     * Guest MSRs.
     
    23982434    if (CPUMIsHyperDebugStateActive(pVCpu))
    23992435    {
    2400         PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     2436        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; /** @todo nested-guest. */
    24012437        Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
    24022438        Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
     
    30203056    {
    30213057        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;
    3022         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
     3058        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    30233059
    30243060        Log4(("Setting IRET intercept\n"));
     
    31963232#endif
    31973233
     3234
    31983235/**
    31993236 * Evaluates the event to be delivered to the guest and sets it as the pending
     
    32023239 * @param   pVCpu       The cross context virtual CPU structure.
    32033240 * @param   pCtx        Pointer to the guest-CPU context.
     3241 *
     3242 * @remarks Don't use this function when we are actively executing a
     3243 *          nested-guest, use hmR0SvmEvaluatePendingEventNested instead.
    32043244 */
    32053245static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
    32063246{
     3247    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    32073248    Assert(!pVCpu->hm.s.Event.fPending);
    32083249
    32093250#ifdef VBOX_WITH_NESTED_HWVIRT
    3210     bool const fGif       = pCtx->hwvirt.svm.fGif;
     3251    bool const fGif = pCtx->hwvirt.svm.fGif;
    32113252#else
    3212     bool const fGif       = true;
     3253    bool const fGif = true;
    32133254#endif
    32143255    Log4Func(("fGif=%RTbool\n", fGif));
     
    33723413    NOREF(pCtx);
    33733414    HMSVM_ASSERT_PREEMPT_SAFE();
     3415    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    33743416    PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    33753417
     
    37383780{
    37393781    HMSVM_ASSERT_PREEMPT_SAFE();
    3740     Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    3741 
    3742 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    3743     /* IEM only for executing nested guest, we shouldn't get here. */
    3744     /** @todo Make this into an assertion since HMR3CanExecuteGuest already checks
    3745      *        for it? */
    3746     if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    3747     {
    3748         Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
    3749         return VINF_EM_RESCHEDULE_REM;
    3750     }
    3751 #endif
     3782    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    37523783
    37533784    /* Check force flag actions that might require us to go back to ring-3. */
     
    38713902    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    38723903    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     3904    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    38733905
    38743906    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     
    39824014    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    39834015    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     4016    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    39844017
    39854018    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     
    41884221                                                                   guest-CPU context. */
    41894222
     4223    /** @todo This could later be optimized. Not now. */
    41904224    HMSvmNstGstVmExitNotify(pVCpu, pMixedCtx);                  /* Restore modified VMCB fields for now, see @bugref{7243#c52} .*/
     4225    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);                  /* Ensure we re-modify the fields before next reentry. */
    41914226}
    41924227#endif
     
    42154250    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    42164251
    4217     PSVMVMCB pVmcb =pVCpu->hm.s.svm.pVmcb;
     4252    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    42184253    pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;        /* Mark the VMCB-state cache as unmodified by VMM. */
    42194254
     
    44664501static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
    44674502{
    4468     Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
     4503    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    44694504    Assert(pcLoops);
    44704505    Assert(*pcLoops <= pVM->hm.s.cMaxResumeLoops);
     
    45144549        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    45154550        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    4516         VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
     4551        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pCtx->hwvirt.svm.CTX_SUFF(pVmcb));
    45174552        rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient);
    45184553        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     
    46224657static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    46234658{
     4659    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    46244660    Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
    46254661    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
     
    49504986                    if (HM_SVM_IS_CTRL_INTERCEPT_SET(pCtx, SVM_CTRL_INTERCEPT_IRET))
    49514987                        return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    4952                     return hmR0SvmNestedExitIret(pVCpu, pCtx, pSvmTransient);
     4988                    return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
    49534989                }
    49544990
     
    52615297                    /** @todo r=ramshankar; We should be doing
    52625298                     *        HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY here! */
    5263 
    52645299                    PSVMVMCB pVmcb   = pVCpu->hm.s.svm.pVmcb;
    52655300                    SVMEVENT Event;
     
    55245559{
    55255560    int rc = VINF_SUCCESS;
    5526     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     5561    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    55275562
    55285563    Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
     
    57935828
    57945829/**
    5795  * Updates interrupt shadow for the current RIP.
    5796  */
    5797 #define HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx) \
    5798     do { \
    5799         /* Update interrupt shadow. */ \
    5800         if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) \
    5801             && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) \
    5802             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); \
    5803     } while (0)
     5830 * Returns whether decode-assist feature is supported.
     5831 *
     5832 * @param   pVCpu       The cross context virtual CPU structure.
     5833 * @param   pCtx        Pointer to the guest-CPU context.
     5834 */
     5835DECLINLINE(bool) hmR0SvmSupportsDecodeAssist(PVMCPU pVCpu, PCPUMCTX pCtx)
     5836{
     5837    PVM pVM = pVCpu->CTX_SUFF(pVM);
     5838#ifdef VBOX_WITH_NESTED_HWVIRT
     5839    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     5840    {
     5841        return    (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST)
     5842               &&  pVM->cpum.ro.GuestFeatures.fSvmDecodeAssist;
     5843    }
     5844#else
     5845    RT_NOREF(pCtx);
     5846#endif
     5847    return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST);
     5848}
     5849
     5850
     5851/**
     5852 * Returns whether NRIP_SAVE feature is supported.
     5853 *
     5854 * @param   pVCpu       The cross context virtual CPU structure.
     5855 * @param   pCtx        Pointer to the guest-CPU context.
     5856 */
     5857DECLINLINE(bool) hmR0SvmSupportsNextRipSave(PVMCPU pVCpu, PCPUMCTX pCtx)
     5858{
     5859    PVM pVM = pVCpu->CTX_SUFF(pVM);
     5860#ifdef VBOX_WITH_NESTED_HWVIRT
     5861    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     5862    {
     5863        return    (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     5864               &&  pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
     5865    }
     5866#else
     5867    RT_NOREF(pCtx);
     5868#endif
     5869    return RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
     5870}
    58045871
    58055872
     
    58195886DECLINLINE(void) hmR0SvmAdvanceRipHwAssist(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t cb)
    58205887{
    5821     if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    5822     {
    5823         PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     5888    bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     5889    if (fSupportsNextRipSave)
     5890    {
     5891        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    58245892        Assert(pVmcb->ctrl.u64NextRIP);
    58255893        AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb);    /* temporary, remove later */
     
    58455913{
    58465914    Assert(cbLikely <= 15);   /* See Intel spec. 2.3.11 "AVX Instruction Length" */
    5847     if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    5848     {
    5849         PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     5915    bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     5916    if (fSupportsNextRipSave)
     5917    {
     5918        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    58505919        uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
    58515920        Assert(cbInstr == cbLikely);
     
    60386107    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
    60396108
    6040     if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST)
    6041     {
    6042         Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
    6043         PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6109    bool const fSupportsDecodeAssist = hmR0SvmSupportsDecodeAssist(pVCpu, pCtx);
     6110    bool const fSupportsNextRipSave  = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     6111    if (   fSupportsDecodeAssist
     6112        && fSupportsNextRipSave)
     6113    {
     6114        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    60446115        uint8_t const cbInstr   = pVmcb->ctrl.u64NextRIP - pCtx->rip;
    60456116        RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
     
    61606231    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]);
    61616232
    6162     PVM pVM = pVCpu->CTX_SUFF(pVM);
    6163     if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST)
    6164     {
    6165         Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
    6166         PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6233    bool const fSupportsDecodeAssist = hmR0SvmSupportsDecodeAssist(pVCpu, pCtx);
     6234    bool const fSupportsNextRipSave  = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     6235    if (   fSupportsDecodeAssist
     6236        && fSupportsNextRipSave)
     6237    {
     6238        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    61676239        bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
    61686240        if (fMovCRx)
     
    61996271
    62006272    VBOXSTRICTRC rcStrict = VERR_SVM_IPE_5;
    6201     PVM pVM = pVCpu->CTX_SUFF(pVM);
    6202     bool fDecodedInstr = false;
    6203     if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSIST)
    6204     {
    6205         Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
    6206         PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6273    PVM          pVM = pVCpu->CTX_SUFF(pVM);
     6274    bool         fDecodedInstr = false;
     6275    bool const   fSupportsDecodeAssist = hmR0SvmSupportsDecodeAssist(pVCpu, pCtx);
     6276    bool const   fSupportsNextRipSave  = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     6277    if (   fSupportsDecodeAssist
     6278        && fSupportsNextRipSave)
     6279    {
     6280        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    62076281        bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
    62086282        if (fMovCRx)
     
    62676341{
    62686342    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    6269     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6343    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    62706344    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
    62716345
     
    62936367        }
    62946368
    6295         if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     6369        bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     6370        if (fSupportsNextRipSave)
    62966371        {
    62976372            rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     
    63416416        Log4(("MSR Read: idMsr=%#RX32\n", pCtx->ecx));
    63426417
    6343         if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     6418        bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     6419        if (fSupportsNextRipSave)
    63446420        {
    63456421            rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     
    63666442    }
    63676443
    6368     /* RIP has been updated by EMInterpret[Rd|Wr]msr(). */
     6444    /* RIP has been updated by EMInterpret[Rd|Wr]msr() or EMInterpretInstruction(). */
    63696445    return rc;
    63706446}
     
    64886564
    64896565    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
    6490     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6566    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    64916567
    64926568    /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
     
    65306606                       only enabling it for Bulldozer and later with NRIP.  OS/2 broke on
    65316607                       2384 Opterons when only checking NRIP. */
    6532                     if (   (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     6608                    bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx);
     6609                    if (   fSupportsNextRipSave
    65336610                        && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First)
    65346611                    {
     
    66996776{
    67006777    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6778    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
     6779
    67016780    PVM pVM = pVCpu->CTX_SUFF(pVM);
    67026781    Assert(pVM->hm.s.fNestedPaging);
     
    68086887{
    68096888    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    6810 
    6811     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6889    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
     6890
     6891    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    68126892    pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;  /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */
    68136893    pVmcb->ctrl.IntCtrl.n.u8VIntrVector = 0;
     
    69086988
    69096989    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
    6910     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6990    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    69116991    hmR0SvmClearIretIntercept(pVmcb);
    69126992
     
    69237003{
    69247004    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7005    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    69257006
    69267007    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
     
    72857366
    72867367    /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
    7287     PSVMVMCB       pVmcb         = pVCpu->hm.s.svm.pVmcb;
     7368    PSVMVMCB       pVmcb         = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    72887369    uint32_t       u32ErrCode    = pVmcb->ctrl.u64ExitInfo1;
    72897370    uint64_t const uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
     
    74517532
    74527533/**
    7453  * Nested-guest \#VMEXIT handler for IRET (SVM_EXIT_VMRUN). Conditional \#VMEXIT.
    7454  */
    7455 HMSVM_EXIT_DECL hmR0SvmNestedExitIret(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    7456 {
    7457     HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7458 
    7459     /* Clear NMI blocking. */
    7460     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    7461 
    7462     /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
    7463     PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    7464     hmR0SvmClearIretIntercept(pVmcbNstGst);
    7465 
    7466     /* Deliver the pending NMI via hmR0SvmEvaluatePendingEventNested() and resume guest execution. */
    7467     return VINF_SUCCESS;
    7468 }
    7469 
    7470 
    7471 /**
    74727534 * Nested-guest \#VMEXIT handler for debug exceptions (SVM_EXIT_EXCEPTION_1).
    74737535 * Unconditional \#VMEXIT.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette