VirtualBox

Changeset 67945 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jul 13, 2017 9:49:32 AM (7 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested Hw.virt: Fixes

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r67925 r67945  
    100100{
    101101#ifndef IN_RING3
     102    RT_NOREF(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    102103    AssertMsgFailed(("iemSvmVmexit: Bad context\n"));
    103104    return VERR_INTERNAL_ERROR_5;
    104 #endif
    105 
     105#else
    106106    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    107107        || uExitCode == SVM_EXIT_INVALID)
     
    216216         */
    217217        memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl));
     218        Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    218219
    219220        if (RT_SUCCESS(rcStrict))
     
    285286                     VBOXSTRICTRC_VAL(rcStrict)));
    286287
    287         Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    288288        return VERR_SVM_VMEXIT_FAILED;
    289289    }
     
    293293    AssertMsgFailed(("iemSvmVmexit: Unexpected SVM-exit failure uExitCode=%#RX64\n", uExitCode));
    294294    return VERR_SVM_IPE_5;
     295#endif
    295296}
    296297
     
    314315{
    315316#ifndef IN_RING3
     317    RT_NOREF(pVCpu, pCtx, cbInstr, GCPhysVmcb);
    316318    return VINF_EM_RESCHEDULE_REM;
    317 #endif
    318 
    319     Assert(pVCpu);
    320     Assert(pCtx);
    321 
     319#else
    322320    PVM pVM = pVCpu->CTX_SUFF(pVM);
    323321    LogFlow(("iemSvmVmrun\n"));
     
    331329     * Read the guest VMCB state.
    332330     */
    333     SVMVMCBSTATESAVE VmcbNstGst;
    334     int rc = PGMPhysSimpleReadGCPhys(pVM, &VmcbNstGst, GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest), sizeof(SVMVMCBSTATESAVE));
     331    int rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pVmcb), GCPhysVmcb, sizeof(SVMVMCB));
    335332    if (RT_SUCCESS(rc))
    336333    {
     334        PSVMVMCBCTRL      pVmcbCtrl   = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
     335        PSVMVMCBSTATESAVE pVmcbNstGst = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->guest;
     336
    337337        /*
    338338         * Save the host state.
     
    355355
    356356        /*
    357          * Read the guest VMCB controls.
    358          */
    359         PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
    360         rc = PGMPhysSimpleReadGCPhys(pVM, pVmcbCtrl, GCPhysVmcb, sizeof(*pVmcbCtrl));
    361         if (RT_SUCCESS(rc))
    362         {
    363             /*
    364              * Validate guest-state and controls.
    365              */
    366             /* VMRUN must always be iHMSntercepted. */
    367             if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))
     357         * Validate guest-state and controls.
     358         */
     359        /* VMRUN must always be iHMSntercepted. */
     360        if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))
     361        {
     362            Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
     363            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     364        }
     365
     366        /* Nested paging. */
     367        if (    pVmcbCtrl->NestedPaging.n.u1NestedPaging
     368            && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
     369        {
     370            Log(("iemSvmVmrun: Nested paging not supported -> #VMEXIT\n"));
     371            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     372        }
     373
     374        /* AVIC. */
     375        if (    pVmcbCtrl->IntCtrl.n.u1AvicEnable
     376            && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
     377        {
     378            Log(("iemSvmVmrun: AVIC not supported -> #VMEXIT\n"));
     379            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     380        }
     381
     382        /* Last branch record (LBR) virtualization. */
     383        if (    (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
     384            && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
     385        {
     386            Log(("iemSvmVmrun: LBR virtualization not supported -> #VMEXIT\n"));
     387            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     388        }
     389
     390        /* Guest ASID. */
     391        if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
     392        {
     393            Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n"));
     394            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     395        }
     396
     397        /* IO permission bitmap. */
     398        RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
     399        if (   (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
     400            || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
     401            || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
     402            || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
     403        {
     404            Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
     405            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     406        }
     407
     408        /* MSR permission bitmap. */
     409        RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
     410        if (   (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
     411            || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
     412            || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
     413        {
     414            Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
     415            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     416        }
     417
     418        /* CR0. */
     419        if (   !(pVmcbNstGst->u64CR0 & X86_CR0_CD)
     420            &&  (pVmcbNstGst->u64CR0 & X86_CR0_NW))
     421        {
     422            Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
     423            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     424        }
     425        if (pVmcbNstGst->u64CR0 >> 32)
     426        {
     427            Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0));
     428            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     429        }
     430        /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
     431
     432        /* DR6 and DR7. */
     433        if (   pVmcbNstGst->u64DR6 >> 32
     434            || pVmcbNstGst->u64DR7 >> 32)
     435        {
     436            Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64DR6,
     437                 pVmcbNstGst->u64DR6));
     438            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     439        }
     440
     441        /** @todo gPAT MSR validation? */
     442
     443        /*
     444         * Copy the IO permission bitmap into the cache.
     445         */
     446        Assert(pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap));
     447        rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,
     448                                     SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
     449        if (RT_FAILURE(rc))
     450        {
     451            Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
     452            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     453        }
     454
     455        /*
     456         * Copy the MSR permission bitmap into the cache.
     457         */
     458        Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
     459        rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,
     460                                     SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
     461        if (RT_FAILURE(rc))
     462        {
     463            Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
     464            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     465        }
     466
     467        /*
     468         * Copy segments from nested-guest VMCB state to the guest-CPU state.
     469         *
     470         * We do this here as we need to use the CS attributes and it's easier this way
     471         * then using the VMCB format selectors. It doesn't really matter where we copy
     472         * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
     473         */
     474        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, ES, es);
     475        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, CS, cs);
     476        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, SS, ss);
     477        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGst, DS, ds);
     478
     479        /** @todo Segment attribute overrides by VMRUN. */
     480
     481        /*
     482         * CPL adjustments and overrides.
     483         *
     484         * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
     485         * We shall thus adjust both CS.DPL and SS.DPL here.
     486         */
     487        pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL;
     488        if (CPUMIsGuestInV86ModeEx(pCtx))
     489            pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3;
     490        if (CPUMIsGuestInRealModeEx(pCtx))
     491            pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0;
     492
     493        Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
     494
     495        /*
     496         * Continue validating guest-state and controls.
     497         */
     498        /* EFER, CR0 and CR4. */
     499        uint64_t uValidEfer;
     500        rc = CPUMQueryValidatedGuestEfer(pVM, pVmcbNstGst->u64CR0, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);
     501        if (RT_FAILURE(rc))
     502        {
     503            Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64EFER));
     504            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     505        }
     506        bool const fSvm                     = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
     507        bool const fLongModeSupported       = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
     508        bool const fLongModeEnabled         = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
     509        bool const fPaging                  = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PG);
     510        bool const fPae                     = RT_BOOL(pVmcbNstGst->u64CR4 & X86_CR4_PAE);
     511        bool const fProtMode                = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PE);
     512        bool const fLongModeWithPaging      = fLongModeEnabled && fPaging;
     513        bool const fLongModeConformCS       = pCtx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig;
     514        /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
     515        if (fLongModeWithPaging)
     516            uValidEfer |= MSR_K6_EFER_LMA;
     517        bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
     518        if (   !fSvm
     519            || (!fLongModeSupported && fLongModeActiveOrEnabled)
     520            || (fLongModeWithPaging && !fPae)
     521            || (fLongModeWithPaging && !fProtMode)
     522            || (   fLongModeEnabled
     523                && fPaging
     524                && fPae
     525                && fLongModeConformCS))
     526        {
     527            Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
     528            return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     529        }
     530
     531        /*
     532         * Preserve the required force-flags.
     533         *
     534         * We only preserve the force-flags that would affect the execution of the
     535         * nested-guest (or the guest).
     536         *
     537         *   - VMCPU_FF_INHIBIT_INTERRUPTS need -not- be preserved as it's for a single
     538         *     instruction which is this VMRUN instruction itself.
     539         *
     540         *   - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
     541         *     execution of a subsequent IRET instruction in the guest.
     542         *
     543         *   - The remaining FFs (e.g. timers) can stay in place so that we will be
     544         *     able to generate interrupts that should cause #VMEXITs for the
     545         *     nested-guest.
     546         */
     547        pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
     548
     549        /*
     550         * Interrupt shadow.
     551         */
     552        if (pVmcbCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
     553        {
     554            LogFlow(("iemSvmVmrun: setting inerrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP));
     555            /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
     556            EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGst->u64RIP);
     557        }
     558
     559        /*
     560         * TLB flush control.
     561         * Currently disabled since it's redundant as we unconditionally flush the TLB
     562         * in iemSvmHandleWorldSwitch() below.
     563         */
     564#if 0
     565        /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
     566        if (   pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
     567            || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
     568            || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
     569            PGMFlushTLB(pVCpu, pVmcbNstGst->u64CR3, true /* fGlobal */);
     570#endif
     571
     572        /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */
     573
     574        uint64_t const uOldEfer = pCtx->msrEFER;
     575        uint64_t const uOldCr0  = pCtx->cr0;
     576
     577        /*
     578         * Copy the remaining guest state from the VMCB to the guest-CPU context.
     579         */
     580        pCtx->gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit;
     581        pCtx->gdtr.pGdt  = pVmcbNstGst->GDTR.u64Base;
     582        pCtx->idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit;
     583        pCtx->idtr.pIdt  = pVmcbNstGst->IDTR.u64Base;
     584        pCtx->cr0        = pVmcbNstGst->u64CR0;   /** @todo What about informing PGM about CR0.WP? */
     585        pCtx->cr4        = pVmcbNstGst->u64CR4;
     586        pCtx->cr3        = pVmcbNstGst->u64CR3;
     587        pCtx->cr2        = pVmcbNstGst->u64CR2;
     588        pCtx->dr[6]      = pVmcbNstGst->u64DR6;
     589        pCtx->dr[7]      = pVmcbNstGst->u64DR7;
     590        pCtx->rflags.u64 = pVmcbNstGst->u64RFlags;
     591        pCtx->rax        = pVmcbNstGst->u64RAX;
     592        pCtx->rsp        = pVmcbNstGst->u64RSP;
     593        pCtx->rip        = pVmcbNstGst->u64RIP;
     594        pCtx->msrEFER    = uValidEfer;
     595
     596        /* Mask DR6, DR7 bits mandatory set/clear bits. */
     597        pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
     598        pCtx->dr[6] |= X86_DR6_RA1_MASK;
     599        pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
     600        pCtx->dr[7] |= X86_DR7_RA1_MASK;
     601
     602        /*
     603         * Check for pending virtual interrupts.
     604         */
     605        if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
     606            VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
     607        else
     608            Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
     609
     610        /*
     611         * Clear global interrupt flags to allow interrupts in the guest.
     612         */
     613        pCtx->hwvirt.svm.fGif = 1;
     614
     615        /*
     616         * Inform PGM and others of the world-switch.
     617         */
     618        VBOXSTRICTRC rcStrict = iemSvmHandleWorldSwitch(pVCpu, uOldEfer, uOldCr0);
     619        if (rcStrict == VINF_SUCCESS)
     620        { /* likely */ }
     621        else if (RT_SUCCESS(rcStrict))
     622            rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     623        else
     624        {
     625            LogFlow(("iemSvmVmrun: iemSvmHandleWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     626            return rcStrict;
     627        }
     628
     629        /*
     630         * Event injection.
     631         */
     632        PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
     633        pCtx->hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;
     634        if (pEventInject->n.u1Valid)
     635        {
     636            uint8_t   const uVector    = pEventInject->n.u8Vector;
     637            TRPMEVENT const enmType    = HMSvmEventToTrpmEventType(pEventInject);
     638            uint16_t  const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
     639
     640            /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
     641            if (RT_UNLIKELY(enmType == TRPM_32BIT_HACK))
    368642            {
    369                 Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
     643                Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
    370644                return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    371645            }
    372 
    373             /* Nested paging. */
    374             if (    pVmcbCtrl->NestedPaging.n.u1NestedPaging
    375                 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
     646            if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
    376647            {
    377                 Log(("iemSvmVmrun: Nested paging not supported -> #VMEXIT\n"));
    378                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    379             }
    380 
    381             /* AVIC. */
    382             if (    pVmcbCtrl->IntCtrl.n.u1AvicEnable
    383                 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
    384             {
    385                 Log(("iemSvmVmrun: AVIC not supported -> #VMEXIT\n"));
    386                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    387             }
    388 
    389             /* Last branch record (LBR) virtualization. */
    390             if (    (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
    391                 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
    392             {
    393                 Log(("iemSvmVmrun: LBR virtualization not supported -> #VMEXIT\n"));
    394                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    395             }
    396 
    397             /* Guest ASID. */
    398             if (!pVmcbCtrl->TLBCtrl.n.u32ASID)
    399             {
    400                 Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n"));
    401                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    402             }
    403 
    404             /* IO permission bitmap. */
    405             RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;
    406             if (   (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)
    407                 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)
    408                 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)
    409                 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))
    410             {
    411                 Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));
    412                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    413             }
    414 
    415             /* MSR permission bitmap. */
    416             RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;
    417             if (   (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
    418                 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)
    419                 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))
    420             {
    421                 Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));
    422                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    423             }
    424 
    425             /* CR0. */
    426             if (   !(VmcbNstGst.u64CR0 & X86_CR0_CD)
    427                 &&  (VmcbNstGst.u64CR0 & X86_CR0_NW))
    428             {
    429                 Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));
    430                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    431             }
    432             if (VmcbNstGst.u64CR0 >> 32)
    433             {
    434                 Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));
    435                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    436             }
    437             /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */
    438 
    439             /* DR6 and DR7. */
    440             if (   VmcbNstGst.u64DR6 >> 32
    441                 || VmcbNstGst.u64DR7 >> 32)
    442             {
    443                 Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64DR6,
    444                      VmcbNstGst.u64DR6));
    445                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    446             }
    447 
    448             /** @todo gPAT MSR validation? */
    449 
    450             /*
    451              * Copy the IO permission bitmap into the cache.
    452              */
    453             Assert(pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap));
    454             rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,
    455                                          SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);
    456             if (RT_FAILURE(rc))
    457             {
    458                 Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));
    459                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    460             }
    461 
    462             /*
    463              * Copy the MSR permission bitmap into the cache.
    464              */
    465             Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
    466             rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,
    467                                          SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
    468             if (RT_FAILURE(rc))
    469             {
    470                 Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
    471                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    472             }
    473 
    474             /*
    475              * Copy segments from nested-guest VMCB state to the guest-CPU state.
    476              *
    477              * We do this here as we need to use the CS attributes and it's easier this way
    478              * then using the VMCB format selectors. It doesn't really matter where we copy
    479              * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.
    480              */
    481             HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, ES, es);
    482             HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, CS, cs);
    483             HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, SS, ss);
    484             HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, DS, ds);
    485 
    486             /** @todo Segment attribute overrides by VMRUN. */
    487 
    488             /*
    489              * CPL adjustments and overrides.
    490              *
    491              * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().
    492              * We shall thus adjust both CS.DPL and SS.DPL here.
    493              */
    494             pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = VmcbNstGst.u8CPL;
    495             if (CPUMIsGuestInV86ModeEx(pCtx))
    496                 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3;
    497             if (CPUMIsGuestInRealModeEx(pCtx))
    498                 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0;
    499 
    500             Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
    501 
    502             /*
    503              * Continue validating guest-state and controls.
    504              */
    505             /* EFER, CR0 and CR4. */
    506             uint64_t uValidEfer;
    507             rc = CPUMQueryValidatedGuestEfer(pVM, VmcbNstGst.u64CR0, VmcbNstGst.u64EFER, VmcbNstGst.u64EFER, &uValidEfer);
    508             if (RT_FAILURE(rc))
    509             {
    510                 Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64EFER));
    511                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    512             }
    513             bool const fSvm                     = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);
    514             bool const fLongModeSupported       = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);
    515             bool const fLongModeEnabled         = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
    516             bool const fPaging                  = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PG);
    517             bool const fPae                     = RT_BOOL(VmcbNstGst.u64CR4 & X86_CR4_PAE);
    518             bool const fProtMode                = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PE);
    519             bool const fLongModeWithPaging      = fLongModeEnabled && fPaging;
    520             bool const fLongModeConformCS       = pCtx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig;
    521             /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */
    522             if (fLongModeWithPaging)
    523                 uValidEfer |= MSR_K6_EFER_LMA;
    524             bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));
    525             if (   !fSvm
    526                 || (!fLongModeSupported && fLongModeActiveOrEnabled)
    527                 || (fLongModeWithPaging && !fPae)
    528                 || (fLongModeWithPaging && !fProtMode)
    529                 || (   fLongModeEnabled
    530                     && fPaging
    531                     && fPae
    532                     && fLongModeConformCS))
    533             {
    534                 Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));
    535                 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    536             }
    537 
    538             /*
    539              * Preserve the required force-flags.
    540              *
    541              * We only preserve the force-flags that would affect the execution of the
    542              * nested-guest (or the guest).
    543              *
    544              *   - VMCPU_FF_INHIBIT_INTERRUPTS need -not- be preserved as it's for a single
    545              *     instruction which is this VMRUN instruction itself.
    546              *
    547              *   - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
    548              *     execution of a subsequent IRET instruction in the guest.
    549              *
    550              *   - The remaining FFs (e.g. timers) can stay in place so that we will be
    551              *     able to generate interrupts that should cause #VMEXITs for the
    552              *     nested-guest.
    553              */
    554             pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
    555 
    556             /*
    557              * Interrupt shadow.
    558              */
    559             if (pVmcbCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
    560             {
    561                 LogFlow(("iemSvmVmrun: setting inerrupt shadow. inhibit PC=%#RX64\n", VmcbNstGst.u64RIP));
    562                 /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
    563                 EMSetInhibitInterruptsPC(pVCpu, VmcbNstGst.u64RIP);
    564             }
    565 
    566             /*
    567              * TLB flush control.
    568              * Currently disabled since it's redundant as we unconditionally flush the TLB
    569              * in iemSvmHandleWorldSwitch() below.
    570              */
    571 #if 0
    572             /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
    573             if (   pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
    574                 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
    575                 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
    576                 PGMFlushTLB(pVCpu, VmcbNstGst.u64CR3, true /* fGlobal */);
    577 #endif
    578 
    579             /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */
    580 
    581             uint64_t const uOldEfer = pCtx->msrEFER;
    582             uint64_t const uOldCr0  = pCtx->cr0;
    583 
    584             /*
    585              * Copy the remaining guest state from the VMCB to the guest-CPU context.
    586              */
    587             pCtx->gdtr.cbGdt = VmcbNstGst.GDTR.u32Limit;
    588             pCtx->gdtr.pGdt  = VmcbNstGst.GDTR.u64Base;
    589             pCtx->idtr.cbIdt = VmcbNstGst.IDTR.u32Limit;
    590             pCtx->idtr.pIdt  = VmcbNstGst.IDTR.u64Base;
    591             pCtx->cr0        = VmcbNstGst.u64CR0;   /** @todo What about informing PGM about CR0.WP? */
    592             pCtx->cr4        = VmcbNstGst.u64CR4;
    593             pCtx->cr3        = VmcbNstGst.u64CR3;
    594             pCtx->cr2        = VmcbNstGst.u64CR2;
    595             pCtx->dr[6]      = VmcbNstGst.u64DR6;
    596             pCtx->dr[7]      = VmcbNstGst.u64DR7;
    597             pCtx->rflags.u64 = VmcbNstGst.u64RFlags;
    598             pCtx->rax        = VmcbNstGst.u64RAX;
    599             pCtx->rsp        = VmcbNstGst.u64RSP;
    600             pCtx->rip        = VmcbNstGst.u64RIP;
    601             pCtx->msrEFER    = uValidEfer;
    602 
    603             /* Mask DR6, DR7 bits mandatory set/clear bits. */
    604             pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
    605             pCtx->dr[6] |= X86_DR6_RA1_MASK;
    606             pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
    607             pCtx->dr[7] |= X86_DR7_RA1_MASK;
    608 
    609             /*
    610              * Check for pending virtual interrupts.
    611              */
    612             if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)
    613                 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    614             else
    615                 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
    616 
    617             /*
    618              * Clear global interrupt flags to allow interrupts in the guest.
    619              */
    620             pCtx->hwvirt.svm.fGif = 1;
    621 
    622             /*
    623              * Inform PGM and others of the world-switch.
    624              */
    625             VBOXSTRICTRC rcStrict = iemSvmHandleWorldSwitch(pVCpu, uOldEfer, uOldCr0);
    626             if (rcStrict == VINF_SUCCESS)
    627             { /* likely */ }
    628             else if (RT_SUCCESS(rcStrict))
    629                 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    630             else
    631             {
    632                 LogFlow(("iemSvmVmrun: iemSvmHandleWorldSwitch unexpected failure. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    633                 return rcStrict;
    634             }
    635 
    636             /*
    637              * Event injection.
    638              */
    639             PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;
    640             pCtx->hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;
    641             if (pEventInject->n.u1Valid)
    642             {
    643                 uint8_t   const uVector    = pEventInject->n.u8Vector;
    644                 TRPMEVENT const enmType    = HMSvmEventToTrpmEventType(pEventInject);
    645                 uint16_t  const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;
    646 
    647                 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */
    648                 if (enmType == TRPM_32BIT_HACK)
     648                if (   uVector == X86_XCPT_NMI
     649                    || uVector > X86_XCPT_LAST)
    649650                {
    650                     Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));
     651                    Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
    651652                    return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    652653                }
    653                 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)
     654                if (   uVector == X86_XCPT_BR
     655                    && CPUMIsGuestInLongModeEx(pCtx))
    654656                {
    655                     if (   uVector == X86_XCPT_NMI
    656                         || uVector > X86_XCPT_LAST)
    657                     {
    658                         Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));
    659                         return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    660                     }
    661                     if (   uVector == X86_XCPT_BR
    662                         && CPUMIsGuestInLongModeEx(pCtx))
    663                     {
    664                         Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
    665                         return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    666                     }
    667                     /** @todo any others? */
     657                    Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));
     658                    return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    668659                }
    669 
    670                 /*
    671                  * Update the exit interruption info field so that if an exception occurs
    672                  * while delivering the event causing a #VMEXIT, we only need to update
    673                  * the valid bit while the rest is already in place.
    674                  */
    675                 pVmcbCtrl->ExitIntInfo.u = pVmcbCtrl->EventInject.u;
    676                 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
    677 
    678                 /** @todo NRIP: Software interrupts can only be pushed properly if we support
    679                  *        NRIP for the nested-guest to calculate the instruction length
    680                  *        below. */
    681                 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 uVector=%#x enmType=%d uErrorCode=%u cr2=%#RX64\n",
    682                          pCtx->cs.Sel, pCtx->rip, uVector, enmType,uErrorCode, pCtx->cr2));
    683                 rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */);
     660                /** @todo any others? */
    684661            }
    685             else
    686                 LogFlow(("iemSvmVmrun: Entered nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
    687                          pCtx->cs.Sel, pCtx->rip, pCtx->cr0, pCtx->cr3, pCtx->cr4, pCtx->msrEFER, pCtx->rflags.u64));
    688 
    689             return rcStrict;
    690         }
    691 
    692         /* Shouldn't really happen as the caller should've validated the physical address already. */
    693         Log(("iemSvmVmrun: Failed to read nested-guest VMCB control area at %#RGp -> #VMEXIT\n",
    694              GCPhysVmcb));
    695         return VERR_SVM_IPE_4;
     662
     663            /*
     664             * Update the exit interruption info field so that if an exception occurs
     665             * while delivering the event causing a #VMEXIT, we only need to update
     666             * the valid bit while the rest is already in place.
     667             */
     668            pVmcbCtrl->ExitIntInfo.u = pVmcbCtrl->EventInject.u;
     669            pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;
     670
     671            /** @todo NRIP: Software interrupts can only be pushed properly if we support
     672             *        NRIP for the nested-guest to calculate the instruction length
     673             *        below. */
     674            LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 uVector=%#x enmType=%d uErrorCode=%u cr2=%#RX64\n",
     675                     pCtx->cs.Sel, pCtx->rip, uVector, enmType,uErrorCode, pCtx->cr2));
     676            rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */);
     677        }
     678        else
     679            LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
     680                     pCtx->cs.Sel, pCtx->rip, pCtx->cr0, pCtx->cr3, pCtx->cr4, pCtx->msrEFER, pCtx->rflags.u64));
     681
     682        return rcStrict;
    696683    }
    697684
    698685    /* Shouldn't really happen as the caller should've validated the physical address already. */
    699     Log(("iemSvmVmrun: Failed to read nested-guest VMCB save-state area at %#RGp -> #VMEXIT\n",
    700          GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest)));
    701     return VERR_IEM_IPE_1;
     686    Log(("iemSvmVmrun: Failed to read nested-guest VMCB at %#RGp (rc=%Rrc) -> #VMEXIT\n", GCPhysVmcb, rc));
     687    return rc;
     688#endif
    702689}
    703690
     
    11161103{
    11171104#ifndef IN_RING3
     1105    RT_NOREF2(pVCpu, cbInstr);
    11181106    return VINF_EM_RESCHEDULE_REM;
    1119 #endif
    1120 
     1107#else
    11211108    LogFlow(("iemCImpl_vmrun\n"));
    11221109    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    11441131    }
    11451132    return rcStrict;
     1133#endif
    11461134}
    11471135
     
    11781166{
    11791167#ifndef IN_RING3
     1168    RT_NOREF2(pVCpu, cbInstr);
    11801169    return VINF_EM_RAW_EMULATE_INSTR;
    1181 #endif
    1182 
     1170#else
    11831171    LogFlow(("iemCImpl_vmload\n"));
    11841172    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    12231211    }
    12241212    return rcStrict;
     1213#endif
    12251214}
    12261215
     
    12321221{
    12331222#ifndef IN_RING3
     1223    RT_NOREF2(pVCpu, cbInstr);
    12341224    return VINF_EM_RAW_EMULATE_INSTR;
    1235 #endif
    1236 
     1225#else
    12371226    LogFlow(("iemCImpl_vmsave\n"));
    12381227    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    12801269    }
    12811270    return rcStrict;
     1271#endif
    12821272}
    12831273
     
    12891279{
    12901280#ifndef IN_RING3
     1281    RT_NOREF2(pVCpu, cbInstr);
    12911282    return VINF_EM_RESCHEDULE_REM;
    1292 #endif
    1293 
     1283#else
    12941284    LogFlow(("iemCImpl_clgi\n"));
    12951285    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    13031293    pCtx->hwvirt.svm.fGif = 0;
    13041294    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1305 #if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     1295# if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    13061296    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
     1297# else
     1298    return VINF_SUCCESS;
     1299# endif
    13071300#endif
    1308     return VINF_SUCCESS;
    13091301}
    13101302
     
    13161308{
    13171309#ifndef IN_RING3
     1310    RT_NOREF2(pVCpu, cbInstr);
    13181311    return VINF_EM_RESCHEDULE_REM;
    1319 #endif
    1320 
     1312#else
    13211313    LogFlow(("iemCImpl_stgi\n"));
    13221314    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    13301322    pCtx->hwvirt.svm.fGif = 1;
    13311323    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1332 #if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     1324# if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    13331325    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
    1334 #else
     1326# else
    13351327    return VINF_SUCCESS;
     1328# endif
    13361329#endif
    13371330}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette