VirtualBox

Changeset 68226 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Aug 2, 2017 9:02:00 AM (7 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: SVM R0 bits.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r67875 r68226  
    231231*   Internal Functions                                                                                                           *
    232232*********************************************************************************************************************************/
    233 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
     233static void hmR0SvmSetMsrPermission(PSVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
     234                                    SVMMSREXITWRITE enmWrite);
    234235static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
    235236static void hmR0SvmLeave(PVMCPU pVCpu);
     
    281282/** @} */
    282283
    283 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
    284 
     284static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
     285#ifdef VBOX_WITH_NESTED_HWVIRT
     286static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
     287static int hmR0SvmExecVmexit(PVMCPU pVCpu, PCPUMCTX pCtx);
     288#endif
    285289
    286290/*********************************************************************************************************************************
     
    456460        {
    457461            RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
    458             pVCpu->hm.s.svm.pvVmcbHost       = 0;
    459462            pVCpu->hm.s.svm.HCPhysVmcbHost   = 0;
    460463            pVCpu->hm.s.svm.hMemObjVmcbHost  = NIL_RTR0MEMOBJ;
     
    464467        {
    465468            RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
    466             pVCpu->hm.s.svm.pvVmcb           = 0;
     469            pVCpu->hm.s.svm.pVmcb            = NULL;
    467470            pVCpu->hm.s.svm.HCPhysVmcb       = 0;
    468471            pVCpu->hm.s.svm.hMemObjVmcb      = NIL_RTR0MEMOBJ;
     
    472475        {
    473476            RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
    474             pVCpu->hm.s.svm.pvMsrBitmap      = 0;
     477            pVCpu->hm.s.svm.pvMsrBitmap      = NULL;
    475478            pVCpu->hm.s.svm.HCPhysMsrBitmap  = 0;
    476479            pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
     
    525528            goto failure_cleanup;
    526529
    527         pVCpu->hm.s.svm.pvVmcbHost     = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
     530        void *pvVmcbHost               = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost);
    528531        pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0 /* iPage */);
    529532        Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G);
    530         ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost);
     533        ASMMemZeroPage(pvVmcbHost);
    531534
    532535        /*
     
    537540            goto failure_cleanup;
    538541
    539         pVCpu->hm.s.svm.pvVmcb          = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
     542        pVCpu->hm.s.svm.pVmcb           = (PSVMVMCB)RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
    540543        pVCpu->hm.s.svm.HCPhysVmcb      = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
    541544        Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
    542         ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb);
     545        ASMMemZeroPage(pVCpu->hm.s.svm.pVmcb);
    543546
    544547        /*
     
    555558        /* Set all bits to intercept all MSR accesses (changed later on). */
    556559        ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
    557     }
     560   }
    558561
    559562    return VINF_SUCCESS;
     
    582585 *
    583586 * @param   pVCpu       The cross context virtual CPU structure.
     587 * @param   pCtx        Pointer to the guest CPU or nested-guest CPU context.
    584588 * @param   uMsr        The MSR for which the access permissions are being set.
    585589 * @param   enmRead     MSR read permissions.
    586590 * @param   enmWrite    MSR write permissions.
    587591 */
    588 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
     592static void hmR0SvmSetMsrPermission(PSVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
     593                                    SVMMSREXITWRITE enmWrite)
    589594{
    590595    uint16_t offMsrpm;
     
    596601    Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
    597602
    598     uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    599603    pbMsrBitmap += offMsrpm;
    600 
    601604    if (enmRead == SVMMSREXIT_INTERCEPT_READ)
    602605        ASMBitSet(pbMsrBitmap, uMsrpmBit);
     
    609612        ASMBitClear(pbMsrBitmap, uMsrpmBit + 1);
    610613
    611     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    612614    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    613615}
     
    634636    {
    635637        PVMCPU   pVCpu = &pVM->aCpus[i];
    636         PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb;
     638        PSVMVMCB pVmcb = pVM->aCpus[i].hm.s.svm.pVmcb;
    637639
    638640        AssertMsgReturn(pVmcb, ("Invalid pVmcb for vcpu[%u]\n", i), VERR_SVM_INVALID_PVMCB);
     
    767769         * Don't intercept guest read/write accesses to these MSRs.
    768770         */
    769         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    770         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    771         hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    772         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    773         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    774         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    775         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    776         hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    777         hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    778         hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     771        uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
     772        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     773        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     774        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     775        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     776        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     777        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     778        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     779        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     780        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     781        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    779782    }
    780783
     
    803806        Log4(("SVMR0InvalidatePage %RGv\n", GCVirt));
    804807
    805         PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     808        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    806809        AssertMsgReturn(pVmcb, ("Invalid pVmcb!\n"), VERR_SVM_INVALID_PVMCB);
    807810
     
    829832{
    830833    PVM pVM              = pVCpu->CTX_SUFF(pVM);
    831     PSVMVMCB pVmcb       = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     834    PSVMVMCB pVmcb       = pVCpu->hm.s.svm.pVmcb;
    832835    PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
    833836
     
    10961099static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    10971100{
     1101    uint64_t u64GuestCR0 = pCtx->cr0;
     1102
     1103    /* Always enable caching. */
     1104    u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
     1105
    10981106    /*
    1099      * Guest CR0.
     1107     * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
    11001108     */
    1101     PVM pVM = pVCpu->CTX_SUFF(pVM);
    1102     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    1103     {
    1104         uint64_t u64GuestCR0 = pCtx->cr0;
    1105 
    1106         /* Always enable caching. */
    1107         u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
    1108 
    1109         /*
    1110          * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
    1111          */
    1112         if (!pVM->hm.s.fNestedPaging)
    1113         {
    1114             u64GuestCR0 |= X86_CR0_PG;     /* When Nested Paging is not available, use shadow page tables. */
    1115             u64GuestCR0 |= X86_CR0_WP;     /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
    1116         }
    1117 
    1118         /*
    1119          * Guest FPU bits.
    1120          */
    1121         bool fInterceptNM = false;
    1122         bool fInterceptMF = false;
    1123         u64GuestCR0 |= X86_CR0_NE;         /* Use internal x87 FPU exceptions handling rather than external interrupts. */
    1124         if (CPUMIsGuestFPUStateActive(pVCpu))
    1125         {
    1126             /* Catch floating point exceptions if we need to report them to the guest in a different way. */
    1127             if (!(pCtx->cr0 & X86_CR0_NE))
    1128             {
    1129                 Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
    1130                 fInterceptMF = true;
    1131             }
    1132         }
    1133         else
    1134         {
    1135             fInterceptNM = true;           /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */
    1136             u64GuestCR0 |=  X86_CR0_TS     /* Guest can task switch quickly and do lazy FPU syncing. */
    1137                           | X86_CR0_MP;    /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
    1138         }
    1139 
    1140         /*
    1141          * Update the exception intercept bitmap.
    1142          */
    1143         if (fInterceptNM)
    1144             hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
    1145         else
    1146             hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_NM);
    1147 
    1148         if (fInterceptMF)
    1149             hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
    1150         else
    1151             hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_MF);
    1152 
    1153         pVmcb->guest.u64CR0 = u64GuestCR0;
    1154         pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1155         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
    1156     }
     1109    if (!pVmcb->ctrl.NestedPaging.n.u1NestedPaging)
     1110    {
     1111        u64GuestCR0 |= X86_CR0_PG;     /* When Nested Paging is not available, use shadow page tables. */
     1112        u64GuestCR0 |= X86_CR0_WP;     /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */
     1113    }
     1114
     1115    /*
     1116     * Guest FPU bits.
     1117     */
     1118    bool fInterceptNM = false;
     1119    bool fInterceptMF = false;
     1120    u64GuestCR0 |= X86_CR0_NE;         /* Use internal x87 FPU exceptions handling rather than external interrupts. */
     1121    if (CPUMIsGuestFPUStateActive(pVCpu))
     1122    {
     1123        /* Catch floating point exceptions if we need to report them to the guest in a different way. */
     1124        if (!(pCtx->cr0 & X86_CR0_NE))
     1125        {
     1126            Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
     1127            fInterceptMF = true;
     1128        }
     1129    }
     1130    else
     1131    {
     1132        fInterceptNM = true;           /* Guest FPU inactive, #VMEXIT on #NM for lazy FPU loading. */
     1133        u64GuestCR0 |=  X86_CR0_TS     /* Guest can task switch quickly and do lazy FPU syncing. */
     1134                      | X86_CR0_MP;    /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
     1135    }
     1136
     1137    /*
     1138     * Update the exception intercept bitmap.
     1139     */
     1140    if (fInterceptNM)
     1141        hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM);
     1142    else
     1143        hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_NM);
     1144
     1145    if (fInterceptMF)
     1146        hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF);
     1147    else
     1148        hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_MF);
     1149
     1150    pVmcb->guest.u64CR0 = u64GuestCR0;
     1151    pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    11571152}
    11581153
     
    12631258
    12641259
     1260#ifdef VBOX_WITH_NESTED_HWVIRT
     1261/**
     1262 * Loads the nested-guest control registers (CR2, CR3, CR4) into the VMCB.
     1263 *
     1264 * @param   pVCpu           The cross context virtual CPU structure.
     1265 * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
     1266 * @param   pCtx            Pointer to the guest-CPU context.
     1267 *
     1268 * @remarks No-long-jump zone!!!
     1269 */
     1270static void hmR0SvmLoadGuestControlRegsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
     1271{
     1272    /*
     1273     * Guest CR2.
     1274     */
     1275    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2))
     1276    {
     1277        pVmcbNstGst->guest.u64CR2 = pCtx->cr2;
     1278        pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
     1279        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
     1280    }
     1281
     1282    /*
     1283     * Guest CR3.
     1284     */
     1285    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
     1286    {
     1287        Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmNestedPaging);
     1288        pVmcbNstGst->guest.u64CR3 = pCtx->cr3;
     1289        pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1290        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
     1291    }
     1292
     1293    /*
     1294     * Guest CR4.
     1295     * ASSUMES this is done everytime we get in from ring-3! (XCR0)
     1296     */
     1297    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
     1298    {
     1299        Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmNestedPaging);
     1300        pVmcbNstGst->guest.u64CR4 = pCtx->cr4;
     1301        pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
     1302
     1303        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+nested-guest XCR0. */
     1304        pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     1305
     1306        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
     1307    }
     1308}
     1309#endif
     1310
     1311
    12651312/**
    12661313 * Loads the guest segment registers into the VMCB.
     
    13671414        }
    13681415    }
    1369 
    13701416
    13711417    /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
     
    13801426
    13811427/**
    1382  * Loads the guest state into the VMCB and programs the necessary intercepts
    1383  * accordingly.
     1428 * Loads the guest (or nested-guest) debug state into the VMCB and programs the
     1429 * necessary intercepts accordingly.
    13841430 *
    13851431 * @param   pVCpu       The cross context virtual CPU structure.
     
    13921438static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    13931439{
    1394     if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    1395         return;
    1396     Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
    1397     Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0);
     1440    Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK);
     1441    Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
     1442    Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
     1443    Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0);
    13981444
    13991445    bool fInterceptMovDRx = false;
     
    15361582        }
    15371583    }
    1538 
    1539     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
    1540 }
    1541 
     1584}
     1585
     1586
     1587#ifdef VBOX_WITH_NESTED_HWVIRT
     1588/**
     1589 * Loads the nested-guest APIC state (currently just the TPR).
     1590 *
     1591 * @param   pVCpu           The cross context virtual CPU structure.
     1592 * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
     1593 */
     1594static void hmR0SvmLoadGuestApicStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst)
     1595{
     1596    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
     1597    {
     1598        /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */
     1599        pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking = 1;
     1600        pVCpu->hm.s.svm.fSyncVTpr = false;
     1601        pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_TPR;
     1602
     1603        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
     1604    }
     1605}
     1606#endif
    15421607
    15431608/**
     
    15731638        {
    15741639            pCtx->msrLSTAR = u8Tpr;
     1640            uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    15751641
    15761642            /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
    15771643            if (fPendingIntr)
    1578                 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1644                hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
    15791645            else
    15801646            {
    1581                 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1647                hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    15821648                pVCpu->hm.s.svm.fSyncVTpr = true;
    15831649            }
     
    16071673
    16081674/**
    1609  * Loads the exception interrupts required for guest execution in the VMCB.
    1610  *
    1611  * @returns VBox status code.
     1675 * Loads the exception interrupts required for guest (or nested-guest) execution in
     1676 * the VMCB.
     1677 *
    16121678 * @param   pVCpu       The cross context virtual CPU structure.
    16131679 * @param   pVmcb       Pointer to the VM control block.
    1614  * @param   pCtx        Pointer to the guest-CPU context.
    1615  */
    1616 static int hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    1617 {
    1618     NOREF(pCtx);
     1680 */
     1681static void hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb)
     1682{
    16191683    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
    16201684    {
     
    16341698        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
    16351699    }
    1636     return VINF_SUCCESS;
    1637 }
     1700}
     1701
     1702
     1703#ifdef VBOX_WITH_NESTED_HWVIRT
     1704/**
     1705 * Loads the intercepts required for nested-guest execution in the VMCB.
     1706 *
     1707 * This merges the guest and nested-guest intercepts in a way that if the outer
     1708 * guest intercepts an exception we need to intercept it in the nested-guest as
     1709 * well and handle it accordingly.
     1710 *
     1711 * @param   pVCpu           The cross context virtual CPU structure.
     1712 * @param   pVmcb           Pointer to the VM control block.
     1713 * @param   pVmcbNstGst     Pointer to the nested-guest VM control block.
     1714 */
     1715static void hmR0SvmMergeIntercepts(PVMCPU pVCpu, PCSVMVMCB pVmcb, PSVMVMCB pVmcbNstGst)
     1716{
     1717    RT_NOREF(pVCpu);
     1718#if 0
     1719    pVmcbNstGst->ctrl.u16InterceptRdCRx |= pVmcb->ctrl.u16InterceptRdCRx;
     1720    pVmcbNstGst->ctrl.u16InterceptWrCRx |= pVmcb->ctrl.u16InterceptWrCRx;
     1721    pVmcbNstGst->ctrl.u16InterceptRdDRx |= pVmcb->ctrl.u16InterceptRdDRx;
     1722    pVmcbNstGst->ctrl.u16InterceptWrDRx |= pVmcb->ctrl.u16InterceptWrDRx;
     1723#endif
     1724    pVmcbNstGst->ctrl.u32InterceptXcpt  |= pVmcb->ctrl.u32InterceptXcpt;
     1725    pVmcbNstGst->ctrl.u64InterceptCtrl  |= pVmcb->ctrl.u64InterceptCtrl;
     1726}
     1727#endif
    16381728
    16391729
     
    16431733 * @returns VBox status code.
    16441734 * @param   pVCpu   The cross context virtual CPU structure.
    1645  * @param   pCtx    Pointer to the guest-CPU context.
    16461735 *
    16471736 * @remarks No-long-jump zone!!!
    16481737 */
    1649 static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pCtx)
    1650 {
    1651     if (CPUMIsGuestInLongModeEx(pCtx))
     1738static int hmR0SvmSetupVMRunHandler(PVMCPU pVCpu)
     1739{
     1740    if (CPUMIsGuestInLongMode(pVCpu))
    16521741    {
    16531742#ifndef VBOX_ENABLE_64_BITS_GUESTS
     
    18001889static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    18011890{
    1802     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1891    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    18031892    AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
    18041893
     
    18191908    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    18201909
    1821     rc = hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx);
    1822     AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    1823 
    1824     rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
     1910    hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb);
     1911
     1912    rc = hmR0SvmSetupVMRunHandler(pVCpu);
    18251913    AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    18261914
     
    18491937
    18501938
    1851 /**
    1852  * Loads the state shared between the host and guest into the
     1939#ifdef VBOX_WITH_NESTED_HWVIRT
     1940/**
     1941 * Loads the nested-guest state into the VMCB.
     1942 *
     1943 * @returns VBox status code.
     1944 * @param   pVCpu       The cross context virtual CPU structure.
     1945 * @param   pCtx        Pointer to the guest-CPU context.
     1946 *
     1947 * @remarks No-long-jump zone!!!
     1948 */
     1949static int hmR0SvmLoadGuestStateNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     1950{
     1951    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
     1952
     1953    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     1954    Assert(pVmcbNstGst);
     1955
     1956    /*
     1957     * If we just emulated VMRUN, the VMCB is already in-sync with the guest-CPU context.
     1958     */
     1959    if (!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0)
     1960    {
     1961        hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
     1962        hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
     1963        hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx);
     1964
     1965        pVmcbNstGst->guest.u64RIP    = pCtx->rip;
     1966        pVmcbNstGst->guest.u64RSP    = pCtx->rsp;
     1967        pVmcbNstGst->guest.u64RFlags = pCtx->eflags.u32;
     1968        pVmcbNstGst->guest.u64RAX    = pCtx->rax;
     1969    }
     1970
     1971    hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst);
     1972    hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcbNstGst);
     1973
     1974    int rc = hmR0SvmSetupVMRunHandler(pVCpu);
     1975    AssertRCReturn(rc, rc);
     1976
     1977    /* Clear any unused and reserved bits. */
     1978    HMCPU_CF_CLEAR(pVCpu,   HM_CHANGED_GUEST_RIP                  /* Unused (loaded unconditionally). */
     1979                          | HM_CHANGED_GUEST_RSP
     1980                          | HM_CHANGED_GUEST_RFLAGS
     1981                          | HM_CHANGED_GUEST_SYSENTER_CS_MSR
     1982                          | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
     1983                          | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
     1984                          | HM_CHANGED_GUEST_LAZY_MSRS            /* Unused. */
     1985                          | HM_CHANGED_SVM_RESERVED1              /* Reserved. */
     1986                          | HM_CHANGED_SVM_RESERVED2
     1987                          | HM_CHANGED_SVM_RESERVED3
     1988                          | HM_CHANGED_SVM_RESERVED4);
     1989
     1990    /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
     1991    AssertMsg(   !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
     1992              ||  HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
     1993               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
     1994
     1995    Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss.Sel, pCtx->rsp));
     1996    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
     1997    return rc;
     1998}
     1999#endif
     2000
     2001
     2002/**
     2003 * Loads the state shared between the host and guest or nested-guest into the
    18532004 * VMCB.
    18542005 *
     
    18652016
    18662017    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
     2018    {
     2019#ifdef VBOX_WITH_NESTED_HWVIRT
     2020        /* We use nested-guest CR0 unmodified, hence nothing to do here. */
     2021        if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     2022            hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
     2023#else
    18672024        hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
     2025#endif
     2026        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
     2027    }
    18682028
    18692029    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
     2030    {
    18702031        hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
     2032        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
     2033    }
    18712034
    18722035    /* Unused on AMD-V. */
     
    18792042
    18802043/**
    1881  * Saves the entire guest state from the VMCB into the
    1882  * guest-CPU context. Currently there is no residual state left in the CPU that
    1883  * is not updated in the VMCB.
     2044 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU context.
     2045 *
     2046 * Currently there is no residual state left in the CPU that is not updated in the
     2047 * VMCB.
    18842048 *
    18852049 * @returns VBox status code.
     
    18882052 *                          out-of-sync. Make sure to update the required fields
    18892053 *                          before using them.
    1890  */
    1891 static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2054 * @param   pVmcb           Pointer to the VM control block.
     2055 */
     2056static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PCSVMVMCB pVmcb)
    18922057{
    18932058    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    1894 
    1895     PSVMVMCB pVmcb        = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    18962059
    18972060    pMixedCtx->rip        = pVmcb->guest.u64RIP;
     
    19122075     */
    19132076    pMixedCtx->cr2        = pVmcb->guest.u64CR2;
     2077
     2078#ifdef VBOX_WITH_NESTED_GUEST
     2079    /*
     2080     * The nested hypervisor might not be intercepting these control registers,
     2081     */
     2082    if (CPUMIsGuestInNestedHwVirtMode(pMixedCtx))
     2083    {
     2084        pMixedCtx->cr3        = pVmcb->guest.u64CR3;
     2085        pMixedCtx->cr4        = pVmcb->guest.u64CR4;
     2086        pMixedCtx->cr0        = pVmcb->guest.u64CR0;
     2087    }
     2088#endif
    19142089
    19152090    /*
     
    20212196     * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
    20222197     */
    2023     if (   pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
     2198#ifdef VBOX_WITH_NESTED_HWVIRT
     2199    Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx) || !pVmcb->ctrl.NestedPaging.n.u1NestedPaging);
     2200#endif
     2201    if (   pVmcb->ctrl.NestedPaging.n.u1NestedPaging
    20242202        && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
    20252203    {
    20262204        CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
    2027         PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3);
     2205        PGMUpdateCR3(pVCpu,    pVmcb->guest.u64CR3);
    20282206    }
    20292207}
     
    20592237    if (CPUMIsHyperDebugStateActive(pVCpu))
    20602238    {
    2061         PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     2239        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    20622240        Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
    20632241        Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
     
    22382416        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    22392417    }
     2418
     2419#ifdef VBOX_WITH_NESTED_HWVIRT
     2420    pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0 = false;
     2421#endif
    22402422
    22412423    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
     
    22702452 * @param   pVM         The cross context VM structure.
    22712453 * @param   pVCpu       The cross context virtual CPU structure.
     2454 * @param   pVmcb       Pointer to the VM control block.
    22722455 *
    22732456 * @remarks No-long-jump zone!!!
    22742457 */
    2275 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu)
    2276 {
    2277     bool     fParavirtTsc;
    2278     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     2458static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PSVMVMCB pVmcb)
     2459{
     2460    bool fParavirtTsc;
    22792461    bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);
    22802462    if (fCanUseRealTsc)
     
    24692651
    24702652/**
    2471  * Gets the guest's interrupt-shadow.
    2472  *
    2473  * @returns The guest's interrupt-shadow.
     2653 * Checks if the guest (or nested-guest) has an interrupt shadow active right
     2654 * now.
     2655 *
     2656 * @returns true if the interrupt shadow is active, false otherwise.
    24742657 * @param   pVCpu   The cross context virtual CPU structure.
    24752658 * @param   pCtx    Pointer to the guest-CPU context.
     
    24782661 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
    24792662 */
    2480 DECLINLINE(uint32_t) hmR0SvmGetGuestIntrShadow(PVMCPU pVCpu, PCPUMCTX pCtx)
     2663DECLINLINE(bool) hmR0SvmIsIntrShadowActive(PVMCPU pVCpu, PCPUMCTX pCtx)
    24812664{
    24822665    /*
     
    24842667     * inhibit interrupts or clear any existing interrupt-inhibition.
    24852668     */
    2486     uint32_t uIntrState = 0;
    24872669    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    24882670    {
     
    24942676             */
    24952677            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    2496         }
    2497         else
    2498             uIntrState = SVM_INTERRUPT_SHADOW_ACTIVE;
    2499     }
    2500     return uIntrState;
     2678            return false;
     2679        }
     2680        return true;
     2681    }
     2682    return false;
    25012683}
    25022684
     
    25782760}
    25792761
    2580 
    2581 /**
    2582  * Evaluates the event to be delivered to the guest and sets it as the pending
    2583  * event.
     2762#ifdef VBOX_WITH_NESTED_HWVIRT
     2763/**
     2764 * Evaluates the event to be delivered to the nested-guest and sets it as the
     2765 * pending event.
    25842766 *
    25852767 * @param   pVCpu       The cross context virtual CPU structure.
    25862768 * @param   pCtx        Pointer to the guest-CPU context.
    25872769 */
    2588 static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
    2589 {
     2770static void hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     2771{
     2772    Log4Func(("\n"));
     2773
    25902774    Assert(!pVCpu->hm.s.Event.fPending);
    2591     Log4Func(("\n"));
    2592 
    2593     bool const fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx));
    2594     bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
    2595     bool const fBlockNmi  = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2596     PSVMVMCB pVmcb        = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    2597 
    2598     if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
    2599         APICUpdatePendingInterrupts(pVCpu);
     2775    Assert(pCtx->hwvirt.svm.fGif);
     2776
     2777    PSVMVMCB pVmcbNstGst  = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    26002778
    26012779    SVMEVENT Event;
    26022780    Event.u = 0;
    2603                                                               /** @todo SMI. SMIs take priority over NMIs. */
    2604     if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts . */
    2605     {
     2781    bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
     2782
     2783    /** @todo SMI. SMIs take priority over NMIs. */
     2784    /*
     2785     * Check if the nested-guest can receive NMIs.
     2786     * NMIs are higher priority than regular interrupts.
     2787     */
     2788    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))
     2789    {
     2790        bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
    26062791        if (fBlockNmi)
    2607             hmR0SvmSetIretIntercept(pVmcb);
     2792            hmR0SvmSetIretIntercept(pVmcbNstGst);
    26082793        else if (fIntShadow)
    2609             hmR0SvmSetVirtIntrIntercept(pVmcb);
     2794            hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
    26102795        else
    26112796        {
     
    26172802
    26182803            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    2619             hmR0SvmSetIretIntercept(pVmcb);
     2804            hmR0SvmSetIretIntercept(pVmcbNstGst);
    26202805            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    2621         }
    2622     }
    2623     else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    2624              && !pVCpu->hm.s.fSingleInstruction)
    2625     {
    2626         /*
    2627          * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
    2628          * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
    2629          */
    2630         if (   !fBlockInt
     2806            return;
     2807        }
     2808    }
     2809
     2810    /*
     2811     * Check if the nested-guest can receive external interrupts (PIC/APIC).
     2812     */
     2813    if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     2814        && !pVCpu->hm.s.fSingleInstruction)
     2815    {
     2816        /* Note: it is critical we call CPUMCanSvmNstGstTakePhysIntr -before- modifying the nested-guests's V_INTR_MASKING
     2817           bit, currently it gets modified in hmR0SvmLoadGuestApicStateNested. */
     2818        bool const fIntEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx);
     2819        if (    fIntEnabled
    26312820            && !fIntShadow)
    26322821        {
     
    26562845        }
    26572846        else
     2847            hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
     2848    }
     2849    /*
     2850     * Check if the nested-guest can receive virtual interrupts.
     2851     */
     2852    else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
     2853    {
     2854        bool const fIntEnabled = CPUMCanSvmNstGstTakeVirtIntr(pCtx);
     2855        if (fIntEnabled)
     2856        {
     2857            uint8_t const u8Interrupt = CPUMGetSvmNstGstInterrupt(pCtx);
     2858            Log4(("Injecting virtual interrupt u8Interrupt=%#x\n", u8Interrupt));
     2859
     2860            Event.n.u1Valid  = 1;
     2861            Event.n.u8Vector = u8Interrupt;
     2862            Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
     2863
     2864            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
     2865            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     2866        }
     2867    }
     2868}
     2869#endif
     2870
     2871/**
     2872 * Evaluates the event to be delivered to the guest and sets it as the pending
     2873 * event.
     2874 *
     2875 * @param   pVCpu       The cross context virtual CPU structure.
     2876 * @param   pCtx        Pointer to the guest-CPU context.
     2877 */
     2878static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
     2879{
     2880    Assert(!pVCpu->hm.s.Event.fPending);
     2881    Log4Func(("\n"));
     2882
     2883    bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
     2884    bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
     2885    bool const fBlockNmi  = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
     2886    PSVMVMCB pVmcb        = pVCpu->hm.s.svm.pVmcb;
     2887
     2888    SVMEVENT Event;
     2889    Event.u = 0;
     2890                                                              /** @todo SMI. SMIs take priority over NMIs. */
     2891    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts. */
     2892    {
     2893        if (fBlockNmi)
     2894            hmR0SvmSetIretIntercept(pVmcb);
     2895        else if (fIntShadow)
     2896            hmR0SvmSetVirtIntrIntercept(pVmcb);
     2897        else
     2898        {
     2899            Log4(("Pending NMI\n"));
     2900
     2901            Event.n.u1Valid  = 1;
     2902            Event.n.u8Vector = X86_XCPT_NMI;
     2903            Event.n.u3Type   = SVM_EVENT_NMI;
     2904
     2905            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     2906            hmR0SvmSetIretIntercept(pVmcb);
     2907            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
     2908        }
     2909    }
     2910    else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     2911             && !pVCpu->hm.s.fSingleInstruction)
     2912    {
     2913        /*
     2914         * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
     2915         * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
     2916         */
     2917        if (   !fBlockInt
     2918            && !fIntShadow)
     2919        {
     2920            uint8_t u8Interrupt;
     2921            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     2922            if (RT_SUCCESS(rc))
     2923            {
     2924                Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
     2925
     2926                Event.n.u1Valid  = 1;
     2927                Event.n.u8Vector = u8Interrupt;
     2928                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
     2929
     2930                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     2931            }
     2932            else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
     2933            {
     2934                /*
     2935                 * AMD-V has no TPR thresholding feature. We just avoid posting the interrupt.
     2936                 * We just avoid delivering the TPR-masked interrupt here. TPR will be updated
     2937                 * always via hmR0SvmLoadGuestState() -> hmR0SvmLoadGuestApicState().
     2938                 */
     2939                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
     2940            }
     2941            else
     2942                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     2943        }
     2944        else
    26582945            hmR0SvmSetVirtIntrIntercept(pVmcb);
    26592946    }
     
    26672954 * @param   pVCpu       The cross context virtual CPU structure.
    26682955 * @param   pCtx        Pointer to the guest-CPU context.
    2669  */
    2670 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
     2956 * @param   pVmcb       Pointer to the VM control block.
     2957 */
     2958static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
    26712959{
    26722960    Assert(!TRPMHasTrap(pVCpu));
    26732961    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    26742962
    2675     bool const fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx));
     2963    bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
    26762964    bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
    2677     PSVMVMCB pVmcb        = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    26782965
    26792966    if (pVCpu->hm.s.Event.fPending)                                /* First, inject any pending HM events. */
     
    26812968        SVMEVENT Event;
    26822969        Event.u = pVCpu->hm.s.Event.u64IntInfo;
     2970
    26832971        Assert(Event.n.u1Valid);
    2684 #ifdef VBOX_STRICT
    26852972        if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ)
    26862973        {
     
    26902977        else if (Event.n.u3Type == SVM_EVENT_NMI)
    26912978            Assert(!fIntShadow);
    2692 #endif
    2693 
    2694 #ifndef RT_OS_WINDOWS
    2695         /* Temporary test for returning guru, later make this function return void as before. */
    2696         if (   Event.n.u3Type == SVM_EVENT_EXCEPTION
    2697             && Event.n.u8Vector == X86_XCPT_PF)
    2698         {
    2699             AssertRelease(pCtx->cr2 == pVCpu->hm.s.Event.GCPtrFaultAddress);
    2700         }
    2701 #endif
    2702 
    2703         Log4(("Injecting pending HM event.\n"));
     2979        NOREF(fBlockInt);
     2980
     2981        Log4(("Injecting pending HM event\n"));
    27042982        hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
    27052983        pVCpu->hm.s.Event.fPending = false;
     
    27152993    /* Update the guest interrupt shadow in the VMCB. */
    27162994    pVmcb->ctrl.u64IntShadow = !!fIntShadow;
    2717     NOREF(fBlockInt);
    27182995}
    27192996
     
    27333010    NOREF(pCtx);
    27343011    HMSVM_ASSERT_PREEMPT_SAFE();
    2735     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     3012    PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    27363013
    27373014    if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
     
    28893166    Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    28903167
     3168    /* Update pending interrupts into the APIC's IRR. */
     3169    if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
     3170        APICUpdatePendingInterrupts(pVCpu);
     3171
    28913172    if (   VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
    28923173                            ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
     
    29433224
    29443225
    2945 /**
    2946  * Does the preparations before executing guest code in AMD-V.
    2947  *
    2948  * This may cause longjmps to ring-3 and may even result in rescheduling to the
    2949  * recompiler. We must be cautious what we do here regarding committing
    2950  * guest-state information into the VMCB assuming we assuredly execute the guest
    2951  * in AMD-V. If we fall back to the recompiler after updating the VMCB and
    2952  * clearing the common-state (TRPM/forceflags), we must undo those changes so
    2953  * that the recompiler can (and should) use them when it resumes guest
    2954  * execution. Otherwise such operations must be done when we can no longer
    2955  * exit to ring-3.
     3226#ifdef VBOX_WITH_NESTED_HWVIRT
     3227/**
     3228 * Does the preparations before executing nested-guest code in AMD-V.
    29563229 *
    29573230 * @returns VBox status code (informational status codes included).
     
    29633236 * @param   pCtx            Pointer to the guest-CPU context.
    29643237 * @param   pSvmTransient   Pointer to the SVM transient structure.
    2965  */
    2966 static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     3238 *
     3239 * @remarks Same caveats regarding longjumps as hmR0SvmPreRunGuest applies.
     3240 * @sa      hmR0SvmPreRunGuest.
     3241 */
     3242static int hmR0SvmPreRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    29673243{
    29683244    HMSVM_ASSERT_PREEMPT_SAFE();
    29693245
    2970 #if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM)
    2971     /* Nested Hw. virt through SVM R0 execution is not yet implemented, IEM only, we shouldn't get here. */
     3246#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    29723247    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    29733248    {
     
    29853260        hmR0SvmTrpmTrapToPendingEvent(pVCpu);
    29863261    else if (!pVCpu->hm.s.Event.fPending)
    2987         hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
     3262        hmR0SvmEvaluatePendingEventNested(pVCpu, pCtx);
    29883263
    29893264    /*
     
    29923267     * NB: If we could continue a task switch exit we wouldn't need to do this.
    29933268     */
    2994     if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))
    2995         if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))
    2996             return VINF_EM_RAW_INJECT_TRPM_EVENT;
    2997 
    2998 #ifdef HMSVM_SYNC_FULL_GUEST_STATE
    2999     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    3000 #endif
    3001 
    3002     /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */
    3003     rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
     3269    if (RT_UNLIKELY(   !pVM->hm.s.svm.u32Features
     3270                    &&  pVCpu->hm.s.Event.fPending
     3271                    &&  SVM_EVENT_GET_TYPE(pVCpu->hm.s.Event.u64IntInfo) == SVM_EVENT_NMI))
     3272    {
     3273        return VINF_EM_RAW_INJECT_TRPM_EVENT;
     3274    }
     3275
     3276    /*
     3277     * Load the nested-guest state. We can optimize this later to be avoided when VMRUN is
     3278     * just emulated in hmR0SvmExecVmrun since the VMCB is already setup by the nested-hypervisor,
     3279     * We currently do this because we may pre-maturely return to ring-3 before executing the
     3280     * nested-guest and doing it here is simpler.
     3281     */
     3282    rc = hmR0SvmLoadGuestStateNested(pVCpu, pCtx);
    30043283    AssertRCReturn(rc, rc);
     3284    /** @todo Get new STAM counter for this? */
    30053285    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
    3006 
    3007     /*
    3008      * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
    3009      * so we can update it on the way back if the guest changed the TPR.
    3010      */
    3011     if (pVCpu->hm.s.svm.fSyncVTpr)
    3012     {
    3013         if (pVM->hm.s.fTPRPatchingActive)
    3014             pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
    3015         else
    3016         {
    3017             PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    3018             pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
    3019         }
    3020     }
    30213286
    30223287    /*
     
    30773342    return VINF_SUCCESS;
    30783343}
    3079 
    3080 
    3081 /**
    3082  * Prepares to run guest code in AMD-V and we've committed to doing so. This
    3083  * means there is no backing out to ring-3 or anywhere else at this
    3084  * point.
     3344#endif
     3345
     3346
     3347/**
     3348 * Does the preparations before executing guest code in AMD-V.
     3349 *
     3350 * This may cause longjmps to ring-3 and may even result in rescheduling to the
     3351 * recompiler. We must be cautious what we do here regarding committing
     3352 * guest-state information into the VMCB assuming we assuredly execute the guest
     3353 * in AMD-V. If we fall back to the recompiler after updating the VMCB and
     3354 * clearing the common-state (TRPM/forceflags), we must undo those changes so
     3355 * that the recompiler can (and should) use them when it resumes guest
     3356 * execution. Otherwise such operations must be done when we can no longer
     3357 * exit to ring-3.
     3358 *
     3359 * @returns VBox status code (informational status codes included).
     3360 * @retval VINF_SUCCESS if we can proceed with running the guest.
     3361 * @retval VINF_* scheduling changes, we have to go back to ring-3.
    30853362 *
    30863363 * @param   pVM             The cross context VM structure.
     
    30883365 * @param   pCtx            Pointer to the guest-CPU context.
    30893366 * @param   pSvmTransient   Pointer to the SVM transient structure.
     3367 */
     3368static int hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     3369{
     3370    HMSVM_ASSERT_PREEMPT_SAFE();
     3371    Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
     3372
     3373#if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM)
     3374
     3375    /* IEM only for executing nested guest, we shouldn't get here. */
     3376    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3377    {
     3378        Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
     3379        return VINF_EM_RESCHEDULE_REM;
     3380    }
     3381#endif
     3382
     3383    /* Check force flag actions that might require us to go back to ring-3. */
     3384    int rc = hmR0SvmCheckForceFlags(pVM, pVCpu, pCtx);
     3385    if (rc != VINF_SUCCESS)
     3386        return rc;
     3387
     3388    if (TRPMHasTrap(pVCpu))
     3389        hmR0SvmTrpmTrapToPendingEvent(pVCpu);
     3390    else if (!pVCpu->hm.s.Event.fPending)
     3391        hmR0SvmEvaluatePendingEvent(pVCpu, pCtx);
     3392
     3393    /*
     3394     * On the oldest AMD-V systems, we may not get enough information to reinject an NMI.
     3395     * Just do it in software, see @bugref{8411}.
     3396     * NB: If we could continue a task switch exit we wouldn't need to do this.
     3397     */
     3398    if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending && (((pVCpu->hm.s.Event.u64IntInfo >> 8) & 7) == SVM_EVENT_NMI)))
     3399        if (RT_UNLIKELY(!pVM->hm.s.svm.u32Features))
     3400            return VINF_EM_RAW_INJECT_TRPM_EVENT;
     3401
     3402#ifdef HMSVM_SYNC_FULL_GUEST_STATE
     3403    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     3404#endif
     3405
     3406    /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */
     3407    rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);
     3408    AssertRCReturn(rc, rc);
     3409    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
     3410
     3411    /*
     3412     * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
     3413     * so we can update it on the way back if the guest changed the TPR.
     3414     */
     3415    if (pVCpu->hm.s.svm.fSyncVTpr)
     3416    {
     3417        if (pVM->hm.s.fTPRPatchingActive)
     3418            pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
     3419        else
     3420        {
     3421            PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     3422            pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
     3423        }
     3424    }
     3425
     3426    /*
     3427     * No longjmps to ring-3 from this point on!!!
     3428     * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
     3429     * This also disables flushing of the R0-logger instance (if any).
     3430     */
     3431    VMMRZCallRing3Disable(pVCpu);
     3432
     3433    /*
     3434     * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
     3435     * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
     3436     *
     3437     * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
     3438     * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
     3439     *
     3440     * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
     3441     * executing guest code.
     3442     */
     3443    pSvmTransient->fEFlags = ASMIntDisableFlags();
     3444    if (   VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
     3445        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     3446    {
     3447        ASMSetFlags(pSvmTransient->fEFlags);
     3448        VMMRZCallRing3Enable(pVCpu);
     3449        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
     3450        return VINF_EM_RAW_TO_R3;
     3451    }
     3452    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     3453    {
     3454        ASMSetFlags(pSvmTransient->fEFlags);
     3455        VMMRZCallRing3Enable(pVCpu);
     3456        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
     3457        return VINF_EM_RAW_INTERRUPT;
     3458    }
     3459
     3460    /*
     3461     * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
     3462     * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
     3463     * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
     3464     *
     3465     * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
     3466     * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
     3467     */
     3468    if (pVCpu->hm.s.Event.fPending)
     3469    {
     3470        SVMEVENT Event;
     3471        Event.u = pVCpu->hm.s.Event.u64IntInfo;
     3472        if (    Event.n.u1Valid
     3473            &&  Event.n.u3Type == SVM_EVENT_NMI
     3474            &&  Event.n.u8Vector == X86_XCPT_NMI
     3475            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     3476        {
     3477            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3478        }
     3479    }
     3480
     3481    return VINF_SUCCESS;
     3482}
     3483
     3484
     3485#ifdef VBOX_WITH_NESTED_HWVIRT
     3486/**
     3487 * Prepares to run nested-guest code in AMD-V and we've committed to doing so. This
     3488 * means there is no backing out to ring-3 or anywhere else at this point.
     3489 *
     3490 * @param   pVM             The cross context VM structure.
     3491 * @param   pVCpu           The cross context virtual CPU structure.
     3492 * @param   pCtx            Pointer to the guest-CPU context.
     3493 * @param   pSvmTransient   Pointer to the SVM transient structure.
    30903494 *
    30913495 * @remarks Called with preemption disabled.
    30923496 * @remarks No-long-jump zone!!!
    30933497 */
    3094 static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     3498static void hmR0SvmPreRunGuestCommittedNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    30953499{
    30963500    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    31013505    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);            /* Indicate the start of guest execution. */
    31023506
    3103     hmR0SvmInjectPendingEvent(pVCpu, pCtx);
     3507    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     3508    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst);
    31043509
    31053510    if (   pVCpu->hm.s.fPreloadGuestFpu
     
    31103515    }
    31113516
    3112     /* Load the state shared between host and guest (FPU, debug). */
    3113     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     3517    /* Load the state shared between host and nested-guest (FPU, debug). */
    31143518    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
    3115         hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
     3519        hmR0SvmLoadSharedState(pVCpu, pVmcbNstGst, pCtx);
     3520
    31163521    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);             /* Preemption might set this, nothing to do on AMD-V. */
    31173522    AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
     
    31223527        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
    31233528    {
    3124         hmR0SvmUpdateTscOffsetting(pVM, pVCpu);
     3529        hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcbNstGst);
    31253530        pSvmTransient->fUpdateTscOffsetting = false;
    31263531    }
     
    31283533    /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
    31293534    if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
    3130         pVmcb->ctrl.u64VmcbCleanBits = 0;
     3535        pVmcbNstGst->ctrl.u64VmcbCleanBits = 0;
    31313536
    31323537    /* Store status of the shared guest-host state at the time of VMRUN. */
     
    31453550    pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
    31463551
    3147     /* Flush the appropriate tagged-TLB entries. */
     3552    /* The TLB flushing would've already been setup by the nested-hypervisor. */
    31483553    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    3149     hmR0SvmFlushTaggedTlb(pVCpu);
    31503554    Assert(hmR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
    31513555
     
    31613565     * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
    31623566     */
     3567    uint8_t *pbMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
    31633568    if (    (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    3164         && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
    3165     {
    3166         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     3569        && !(pVmcbNstGst->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
     3570    {
     3571        hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    31673572        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
    31683573        uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
     
    31733578    else
    31743579    {
    3175         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     3580        hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     3581        pSvmTransient->fRestoreTscAuxMsr = false;
     3582    }
     3583
     3584    /*
     3585     * If VMCB Clean bits isn't supported by the CPU or exposed by the guest,
     3586     * mark all state-bits as dirty indicating to the CPU to re-load from VMCB.
     3587     */
     3588    if (   !(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN)
     3589        || !(pVM->cpum.ro.GuestFeatures.fSvmVmcbClean))
     3590        pVmcbNstGst->ctrl.u64VmcbCleanBits = 0;
     3591}
     3592#endif
     3593
     3594
     3595/**
     3596 * Prepares to run guest code in AMD-V and we've committed to doing so. This
     3597 * means there is no backing out to ring-3 or anywhere else at this
     3598 * point.
     3599 *
     3600 * @param   pVM             The cross context VM structure.
     3601 * @param   pVCpu           The cross context virtual CPU structure.
     3602 * @param   pCtx            Pointer to the guest-CPU context.
     3603 * @param   pSvmTransient   Pointer to the SVM transient structure.
     3604 *
     3605 * @remarks Called with preemption disabled.
     3606 * @remarks No-long-jump zone!!!
     3607 */
     3608static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     3609{
     3610    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     3611    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     3612    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     3613
     3614    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     3615    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);            /* Indicate the start of guest execution. */
     3616
     3617    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     3618    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
     3619
     3620    if (   pVCpu->hm.s.fPreloadGuestFpu
     3621        && !CPUMIsGuestFPUStateActive(pVCpu))
     3622    {
     3623        CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */
     3624        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     3625    }
     3626
     3627    /* Load the state shared between host and guest (FPU, debug). */
     3628    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
     3629        hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
     3630
     3631    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);             /* Preemption might set this, nothing to do on AMD-V. */
     3632    AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
     3633
     3634    /* Setup TSC offsetting. */
     3635    RTCPUID idCurrentCpu = hmR0GetCurrentCpu()->idCpu;
     3636    if (   pSvmTransient->fUpdateTscOffsetting
     3637        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
     3638    {
     3639        hmR0SvmUpdateTscOffsetting(pVM, pVCpu, pVmcb);
     3640        pSvmTransient->fUpdateTscOffsetting = false;
     3641    }
     3642
     3643    /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
     3644    if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
     3645        pVmcb->ctrl.u64VmcbCleanBits = 0;
     3646
     3647    /* Store status of the shared guest-host state at the time of VMRUN. */
     3648#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     3649    if (CPUMIsGuestInLongModeEx(pCtx))
     3650    {
     3651        pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
     3652        pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
     3653    }
     3654    else
     3655#endif
     3656    {
     3657        pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
     3658        pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
     3659    }
     3660    pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
     3661
     3662    /* Flush the appropriate tagged-TLB entries. */
     3663    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
     3664    hmR0SvmFlushTaggedTlb(pVCpu);
     3665    Assert(hmR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
     3666
     3667    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
     3668
     3669    TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
     3670                                                                   to start executing. */
     3671
     3672    /*
     3673     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
     3674     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
     3675     *
     3676     * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
     3677     */
     3678    uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
     3679    if (    (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
     3680        && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
     3681    {
     3682        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     3683        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     3684        uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
     3685        if (u64GuestTscAux != pVCpu->hm.s.u64HostTscAux)
     3686            ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
     3687        pSvmTransient->fRestoreTscAuxMsr = true;
     3688    }
     3689    else
     3690    {
     3691        hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
    31763692        pSvmTransient->fRestoreTscAuxMsr = false;
    31773693    }
     
    32093725
    32103726
     3727#ifdef VBOX_WITH_NESTED_HWVIRT
     3728/**
     3729 * Performs some essential restoration of state after running nested-guest code in
     3730 * AMD-V.
     3731 *
     3732 * @param   pVM             The cross context VM structure.
     3733 * @param   pVCpu           The cross context virtual CPU structure.
     3734 * @param   pMixedCtx       Pointer to the nested-guest-CPU context. The data maybe
     3735 *                          out-of-sync. Make sure to update the required fields
     3736 *                          before using them.
     3737 * @param   pSvmTransient   Pointer to the SVM transient structure.
     3738 * @param   rcVMRun         Return code of VMRUN.
     3739 *
     3740 * @remarks Called with interrupts disabled.
     3741 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
     3742 *          unconditionally when it is safe to do so.
     3743 */
     3744static void hmR0SvmPostRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
     3745{
     3746    RT_NOREF(pVM);
     3747    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     3748
     3749    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
     3750    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
     3751
     3752    /* TSC read must be done early for maximum accuracy. */
     3753    PSVMVMCB     pVmcbNstGst     = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     3754    PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     3755    if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
     3756        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset);
     3757
     3758    if (pSvmTransient->fRestoreTscAuxMsr)
     3759    {
     3760        uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
     3761        CPUMR0SetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
     3762        if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux)
     3763            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
     3764    }
     3765
     3766    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
     3767    TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
     3768    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     3769
     3770    Assert(!(ASMGetFlags() & X86_EFL_IF));
     3771    ASMSetFlags(pSvmTransient->fEFlags);                        /* Enable interrupts. */
     3772    VMMRZCallRing3Enable(pVCpu);                                /* It is now safe to do longjmps to ring-3!!! */
     3773
     3774    /* Mark the VMCB-state cache as unmodified by VMM. */
     3775    pVmcbNstGstCtrl->u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;
     3776
     3777    /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
     3778    if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
     3779    {
     3780        Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
     3781        return;
     3782    }
     3783
     3784    pSvmTransient->u64ExitCode  = pVmcbNstGstCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
     3785    HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcbNstGstCtrl->u64ExitCode);/* Update the #VMEXIT history array. */
     3786    pSvmTransient->fVectoringDoublePF = false;                  /* Vectoring double page-fault needs to be determined later. */
     3787    pSvmTransient->fVectoringPF       = false;                  /* Vectoring page-fault needs to be determined later. */
     3788
     3789    Assert(!pVCpu->hm.s.svm.fSyncVTpr);
     3790    hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst);       /* Save the nested-guest state from the VMCB to the
     3791                                                                   guest-CPU context. */
     3792}
     3793#endif
     3794
    32113795/**
    32123796 * Performs some essential restoration of state after running guest code in
     
    32323816    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    32333817
    3234     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     3818    PSVMVMCB pVmcb =pVCpu->hm.s.svm.pVmcb;
    32353819    pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;        /* Mark the VMCB-state cache as unmodified by VMM. */
    32363820
     
    32673851    pSvmTransient->fVectoringPF = false;                        /* Vectoring page-fault needs to be determined later. */
    32683852
    3269     hmR0SvmSaveGuestState(pVCpu, pMixedCtx);                    /* Save the guest state from the VMCB to the guest-CPU context. */
     3853    hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcb);             /* Save the guest state from the VMCB to the guest-CPU context. */
    32703854
    32713855    if (RT_LIKELY(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID))
     
    32993883 * @param   pVCpu       The cross context virtual CPU structure.
    33003884 * @param   pCtx        Pointer to the guest-CPU context.
    3301  */
    3302 static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    3303 {
     3885 * @param   pcLoops     Pointer to the number of executed loops.
     3886 */
     3887static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     3888{
     3889    uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
     3890    Assert(pcLoops);
     3891    Assert(*pcLoops <= cMaxResumeLoops);
     3892
    33043893    SVMTRANSIENT SvmTransient;
    33053894    SvmTransient.fUpdateTscOffsetting = true;
    3306     uint32_t cLoops = 0;
    3307     int      rc     = VERR_INTERNAL_ERROR_5;
    3308 
    3309     for (;; cLoops++)
     3895
     3896    int rc = VERR_INTERNAL_ERROR_5;
     3897    for (;;)
    33103898    {
    33113899        Assert(!HMR0SuspendPending());
     
    33443932        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    33453933        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    3346         VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb);
     3934        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
    33473935        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
    33483936        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
    33493937        if (rc != VINF_SUCCESS)
    33503938            break;
    3351         if (cLoops > pVM->hm.s.cMaxResumeLoops)
     3939        if (++(*pcLoops) >= cMaxResumeLoops)
    33523940        {
    33533941            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     
    33693957 * @param   pVCpu       The cross context virtual CPU structure.
    33703958 * @param   pCtx        Pointer to the guest-CPU context.
    3371  */
    3372 static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    3373 {
     3959 * @param   pcLoops     Pointer to the number of executed loops.
     3960 */
     3961static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     3962{
     3963    uint32_t const cMaxResumeLoops = pVM->hm.s.cMaxResumeLoops;
     3964    Assert(pcLoops);
     3965    Assert(*pcLoops <= cMaxResumeLoops);
     3966
    33743967    SVMTRANSIENT SvmTransient;
    33753968    SvmTransient.fUpdateTscOffsetting = true;
    3376     uint32_t cLoops  = 0;
    3377     int      rc      = VERR_INTERNAL_ERROR_5;
     3969
    33783970    uint16_t uCsStart  = pCtx->cs.Sel;
    33793971    uint64_t uRipStart = pCtx->rip;
    33803972
    3381     for (;; cLoops++)
     3973    int rc = VERR_INTERNAL_ERROR_5;
     3974    for (;;)
    33823975    {
    33833976        Assert(!HMR0SuspendPending());
    33843977        AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
    33853978                  ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
    3386                   (unsigned)RTMpCpuId(), cLoops));
     3979                  (unsigned)RTMpCpuId(), *pcLoops));
    33873980
    33883981        /* Preparatory work for running guest code, this may force us to return
     
    34224015        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    34234016        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    3424         VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb);
     4017        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
    34254018        rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient);
    34264019        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
    34274020        if (rc != VINF_SUCCESS)
    34284021            break;
    3429         if (cLoops > pVM->hm.s.cMaxResumeLoops)
     4022        if (++(*pcLoops) >= cMaxResumeLoops)
    34304023        {
    34314024            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     
    34604053}
    34614054
     4055#ifdef VBOX_WITH_NESTED_HWVIRT
     4056/**
     4057 * Runs the nested-guest code using AMD-V.
     4058 *
     4059 * @returns VBox status code.
     4060 * @param   pVM         The cross context VM structure.
     4061 * @param   pVCpu       The cross context virtual CPU structure.
     4062 * @param   pCtx        Pointer to the guest-CPU context.
     4063 * @param   pcLoops     Pointer to the number of executed loops. If we're switching
     4064 *                      from the guest-code execution loop to this nested-guest
     4065 *                      execution loop pass the remainder value, else pass 0.
     4066 */
     4067static int hmR0SvmRunGuestCodeNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t *pcLoops)
     4068{
     4069    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
     4070    Assert(pcLoops);
     4071    Assert(*pcLoops <= pVM->hm.s.cMaxResumeLoops);
     4072
     4073    SVMTRANSIENT SvmTransient;
     4074    SvmTransient.fUpdateTscOffsetting = true;
     4075
     4076    int rc = VERR_INTERNAL_ERROR_4;
     4077    for (;;)
     4078    {
     4079        Assert(!HMR0SuspendPending());
     4080        HMSVM_ASSERT_CPU_SAFE();
     4081
     4082        /* Preparatory work for running nested-guest code, this may force us to return
     4083           to ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
     4084        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     4085        rc = hmR0SvmPreRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient);
     4086        if (rc != VINF_SUCCESS)
     4087            break;
     4088
     4089        /*
     4090         * No longjmps to ring-3 from this point on!!!
     4091         * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
     4092         * This also disables flushing of the R0-logger instance (if any).
     4093         */
     4094        hmR0SvmPreRunGuestCommittedNested(pVM, pVCpu, pCtx, &SvmTransient);
     4095        rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     4096
     4097        /* Restore any residual host-state and save any bits shared between host
     4098           and guest into the guest-CPU state.  Re-enables interrupts! */
     4099        hmR0SvmPostRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient, rc);
     4100
     4101        if (RT_UNLIKELY(   rc != VINF_SUCCESS                               /* Check for VMRUN errors. */
     4102                        || SvmTransient.u64ExitCode == SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
     4103        {
     4104            if (rc == VINF_SUCCESS)
     4105                rc = VERR_SVM_INVALID_GUEST_STATE;
     4106            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
     4107            hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
     4108            break;
     4109        }
     4110
     4111        /* Handle the #VMEXIT. */
     4112        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
     4113        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     4114        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hm.s.svm.pVmcb);
     4115        rc = hmR0SvmHandleExitNested(pVCpu, pCtx, &SvmTransient);
     4116        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     4117        if (rc != VINF_SUCCESS)
     4118            break;
     4119        if (++(*pcLoops) >= pVM->hm.s.cMaxResumeLoops)
     4120        {
     4121            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     4122            rc = VINF_EM_RAW_INTERRUPT;
     4123            break;
     4124        }
     4125
     4126        /** @todo handle single-stepping   */
     4127    }
     4128
     4129    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     4130    return rc;
     4131}
     4132#endif
     4133
    34624134
    34634135/**
     
    34754147    VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx);
    34764148
    3477     int rc;
    3478     if (!pVCpu->hm.s.fSingleInstruction)
    3479         rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx);
     4149    uint32_t cLoops = 0;
     4150    int      rc;
     4151#ifdef VBOX_WITH_NESTED_HWVIRT
     4152    if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     4153#endif
     4154    {
     4155        if (!pVCpu->hm.s.fSingleInstruction)
     4156            rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx, &cLoops);
     4157        else
     4158            rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx, &cLoops);
     4159    }
     4160#ifdef VBOX_WITH_NESTED_HWVIRT
    34804161    else
    3481         rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx);
     4162    {
     4163        rc = VINF_SVM_VMRUN;
     4164    }
     4165
     4166    /* Re-check the nested-guest condition here as we may be transitioning from the normal
     4167       execution loop into the nested-guest. */
     4168    if (rc == VINF_SVM_VMRUN)
     4169        rc = hmR0SvmRunGuestCodeNested(pVM, pVCpu, pCtx, &cLoops);
     4170#endif
    34824171
    34834172    if (rc == VERR_EM_INTERPRETER)
     
    34934182
    34944183
    3495 /**
    3496  * Handles a \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
     4184#ifdef VBOX_WITH_NESTED_HWVIRT
     4185/**
     4186 * Handles a nested-guest \#VMEXIT (for all EXITCODE values except
     4187 * SVM_EXIT_INVALID).
    34974188 *
    34984189 * @returns VBox status code (informational status codes included).
     
    35014192 * @param   pSvmTransient   Pointer to the SVM transient structure.
    35024193 */
    3503 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4194static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    35044195{
    35054196    Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
    35064197    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
    35074198
    3508     /*
    3509      * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under
    3510      * normal workloads (for some definition of "normal").
    3511      */
    3512     uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
     4199    PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     4200    PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    35134201    switch (pSvmTransient->u64ExitCode)
    35144202    {
    3515         case SVM_EXIT_NPF:
    3516             return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
     4203        //case SVM_EXIT_NPF:
     4204        {
     4205            /** @todo. */
     4206            break;
     4207        }
    35174208
    35184209        case SVM_EXIT_IOIO:
     4210        {
     4211            /*
     4212             * Figure out if the IO port access is intercepted by the nested-guest. If not,
     4213             * we pass it to the outer guest.
     4214             */
     4215            if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_IOIO_PROT)
     4216            {
     4217                void *pvIoBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
     4218                SVMIOIOEXITINFO IoExitInfo;
     4219                IoExitInfo.u = (uint32_t)pVmcbNstGst->ctrl.u64ExitInfo1;
     4220                bool const fIntercept = HMSvmIsIOInterceptActive(pvIoBitmap, IoExitInfo.n.u16Port,
     4221                                                                 (SVMIOIOTYPE)IoExitInfo.n.u1Type,
     4222                                                                 (IoExitInfo.u >> SVM_IOIO_OP_SIZE_SHIFT) & 7,
     4223                                                                 (IoExitInfo.u >> SVM_IOIO_ADDR_SIZE_SHIFT) << 4,
     4224                                                                 IoExitInfo.n.u3SEG, IoExitInfo.n.u1REP, IoExitInfo.n.u1STR,
     4225                                                                 NULL /* pIoExitInfo */);
     4226                if (fIntercept)
     4227                    return hmR0SvmExecVmexit(pVCpu, pCtx);
     4228            }
    35194229            return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
     4230        }
    35204231
    35214232        case SVM_EXIT_RDTSC:
     4233        {
    35224234            return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
     4235        }
    35234236
    35244237        case SVM_EXIT_RDTSCP:
     
    36864399                     *        HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY here! */
    36874400
    3688                     PSVMVMCB pVmcb   = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     4401                    PSVMVMCB pVmcb   = pVCpu->hm.s.svm.pVmcb;
     4402                    SVMEVENT Event;
     4403                    Event.u          = 0;
     4404                    Event.n.u1Valid  = 1;
     4405                    Event.n.u3Type   = SVM_EVENT_EXCEPTION;
     4406                    Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0;
     4407
     4408                    switch (Event.n.u8Vector)
     4409                    {
     4410                        case X86_XCPT_DE:
     4411                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
     4412                            break;
     4413
     4414                        case X86_XCPT_NP:
     4415                            Event.n.u1ErrorCodeValid    = 1;
     4416                            Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
     4417                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
     4418                            break;
     4419
     4420                        case X86_XCPT_SS:
     4421                            Event.n.u1ErrorCodeValid    = 1;
     4422                            Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
     4423                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
     4424                            break;
     4425
     4426                        case X86_XCPT_GP:
     4427                            Event.n.u1ErrorCodeValid    = 1;
     4428                            Event.n.u32ErrorCode        = pVmcb->ctrl.u64ExitInfo1;
     4429                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
     4430                            break;
     4431
     4432                        default:
     4433                            AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit caused by exception %#x\n", Event.n.u8Vector));
     4434                            pVCpu->hm.s.u32HMError = Event.n.u8Vector;
     4435                            return VERR_SVM_UNEXPECTED_XCPT_EXIT;
     4436                    }
     4437
     4438                    Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
     4439                    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     4440                    return VINF_SUCCESS;
     4441                }
     4442#endif  /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
     4443
     4444                default:
     4445                {
     4446                    AssertMsgFailed(("hmR0SvmHandleExit: Unknown exit code %#x\n", pSvmTransient->u64ExitCode));
     4447                    pVCpu->hm.s.u32HMError = pSvmTransient->u64ExitCode;
     4448                    return VERR_SVM_UNKNOWN_EXIT;
     4449                }
     4450            }
     4451        }
     4452    }
     4453    /* not reached */
     4454}
     4455#endif
     4456
     4457
     4458/**
     4459 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
     4460 *
     4461 * @returns VBox status code (informational status codes included).
     4462 * @param   pVCpu           The cross context virtual CPU structure.
     4463 * @param   pCtx            Pointer to the guest-CPU context.
     4464 * @param   pSvmTransient   Pointer to the SVM transient structure.
     4465 */
     4466static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4467{
     4468    Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
     4469    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
     4470
     4471    /*
     4472     * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under
     4473     * normal workloads (for some definition of "normal").
     4474     */
     4475    uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
     4476    switch (pSvmTransient->u64ExitCode)
     4477    {
     4478        case SVM_EXIT_NPF:
     4479            return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
     4480
     4481        case SVM_EXIT_IOIO:
     4482            return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
     4483
     4484        case SVM_EXIT_RDTSC:
     4485            return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
     4486
     4487        case SVM_EXIT_RDTSCP:
     4488            return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
     4489
     4490        case SVM_EXIT_CPUID:
     4491            return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
     4492
     4493        case SVM_EXIT_EXCEPTION_14:  /* X86_XCPT_PF */
     4494            return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient);
     4495
     4496        case SVM_EXIT_EXCEPTION_7:   /* X86_XCPT_NM */
     4497            return hmR0SvmExitXcptNM(pVCpu, pCtx, pSvmTransient);
     4498
     4499        case SVM_EXIT_EXCEPTION_6:   /* X86_XCPT_UD */
     4500            return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
     4501
     4502        case SVM_EXIT_EXCEPTION_16:  /* X86_XCPT_MF */
     4503            return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
     4504
     4505        case SVM_EXIT_EXCEPTION_1:   /* X86_XCPT_DB */
     4506            return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient);
     4507
     4508        case SVM_EXIT_EXCEPTION_17:  /* X86_XCPT_AC */
     4509            return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient);
     4510
     4511        case SVM_EXIT_EXCEPTION_3:   /* X86_XCPT_BP */
     4512            return hmR0SvmExitXcptBP(pVCpu, pCtx, pSvmTransient);
     4513
     4514        case SVM_EXIT_MONITOR:
     4515            return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
     4516
     4517        case SVM_EXIT_MWAIT:
     4518            return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
     4519
     4520        case SVM_EXIT_HLT:
     4521            return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
     4522
     4523        case SVM_EXIT_READ_CR0:
     4524        case SVM_EXIT_READ_CR3:
     4525        case SVM_EXIT_READ_CR4:
     4526            return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
     4527
     4528        case SVM_EXIT_WRITE_CR0:
     4529        case SVM_EXIT_WRITE_CR3:
     4530        case SVM_EXIT_WRITE_CR4:
     4531        case SVM_EXIT_WRITE_CR8:
     4532            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
     4533
     4534        case SVM_EXIT_PAUSE:
     4535            return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient);
     4536
     4537        case SVM_EXIT_VMMCALL:
     4538            return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
     4539
     4540        case SVM_EXIT_VINTR:
     4541            return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient);
     4542
     4543        case SVM_EXIT_INTR:
     4544        case SVM_EXIT_FERR_FREEZE:
     4545        case SVM_EXIT_NMI:
     4546            return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
     4547
     4548        case SVM_EXIT_MSR:
     4549            return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
     4550
     4551        case SVM_EXIT_INVLPG:
     4552            return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
     4553
     4554        case SVM_EXIT_WBINVD:
     4555            return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
     4556
     4557        case SVM_EXIT_INVD:
     4558            return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
     4559
     4560        case SVM_EXIT_RDPMC:
     4561            return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
     4562
     4563        default:
     4564        {
     4565            switch (pSvmTransient->u64ExitCode)
     4566            {
     4567                case SVM_EXIT_READ_DR0:     case SVM_EXIT_READ_DR1:     case SVM_EXIT_READ_DR2:     case SVM_EXIT_READ_DR3:
     4568                case SVM_EXIT_READ_DR6:     case SVM_EXIT_READ_DR7:     case SVM_EXIT_READ_DR8:     case SVM_EXIT_READ_DR9:
     4569                case SVM_EXIT_READ_DR10:    case SVM_EXIT_READ_DR11:    case SVM_EXIT_READ_DR12:    case SVM_EXIT_READ_DR13:
     4570                case SVM_EXIT_READ_DR14:    case SVM_EXIT_READ_DR15:
     4571                    return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
     4572
     4573                case SVM_EXIT_WRITE_DR0:    case SVM_EXIT_WRITE_DR1:    case SVM_EXIT_WRITE_DR2:    case SVM_EXIT_WRITE_DR3:
     4574                case SVM_EXIT_WRITE_DR6:    case SVM_EXIT_WRITE_DR7:    case SVM_EXIT_WRITE_DR8:    case SVM_EXIT_WRITE_DR9:
     4575                case SVM_EXIT_WRITE_DR10:   case SVM_EXIT_WRITE_DR11:   case SVM_EXIT_WRITE_DR12:   case SVM_EXIT_WRITE_DR13:
     4576                case SVM_EXIT_WRITE_DR14:   case SVM_EXIT_WRITE_DR15:
     4577                    return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
     4578
     4579                case SVM_EXIT_XSETBV:
     4580                    return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient);
     4581
     4582                case SVM_EXIT_TASK_SWITCH:
     4583                    return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
     4584
     4585                case SVM_EXIT_IRET:
     4586                    return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
     4587
     4588                case SVM_EXIT_SHUTDOWN:
     4589                    return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
     4590
     4591                case SVM_EXIT_SMI:
     4592                case SVM_EXIT_INIT:
     4593                {
     4594                    /*
     4595                     * We don't intercept NMIs. As for INIT signals, it really shouldn't ever happen here. If it ever does,
     4596                     * we want to know about it so log the exit code and bail.
     4597                     */
     4598                    AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit %#RX32\n", (uint32_t)pSvmTransient->u64ExitCode));
     4599                    pVCpu->hm.s.u32HMError = (uint32_t)pSvmTransient->u64ExitCode;
     4600                    return VERR_SVM_UNEXPECTED_EXIT;
     4601                }
     4602
     4603#ifdef VBOX_WITH_NESTED_HWVIRT
     4604                case SVM_EXIT_CLGI:     return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
     4605                case SVM_EXIT_STGI:     return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
     4606                case SVM_EXIT_VMLOAD:   return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);
     4607                case SVM_EXIT_VMSAVE:   return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);
     4608                case SVM_EXIT_INVLPGA:  return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient);
     4609                case SVM_EXIT_VMRUN:    return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient);
     4610#else
     4611                case SVM_EXIT_CLGI:
     4612                case SVM_EXIT_STGI:
     4613                case SVM_EXIT_VMLOAD:
     4614                case SVM_EXIT_VMSAVE:
     4615                case SVM_EXIT_INVLPGA:
     4616                case SVM_EXIT_VMRUN:
     4617#endif
     4618                case SVM_EXIT_RSM:
     4619                case SVM_EXIT_SKINIT:
     4620                    return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
     4621
     4622#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
     4623                case SVM_EXIT_EXCEPTION_0:             /* X86_XCPT_DE */
     4624                /*   SVM_EXIT_EXCEPTION_1: */          /* X86_XCPT_DB - Handled above. */
     4625                case SVM_EXIT_EXCEPTION_2:             /* X86_XCPT_NMI */
     4626                /*   SVM_EXIT_EXCEPTION_3: */          /* X86_XCPT_BP - Handled above. */
     4627                case SVM_EXIT_EXCEPTION_4:             /* X86_XCPT_OF */
     4628                case SVM_EXIT_EXCEPTION_5:             /* X86_XCPT_BR */
     4629                /*   SVM_EXIT_EXCEPTION_6: */          /* X86_XCPT_UD - Handled above. */
     4630                /*   SVM_EXIT_EXCEPTION_7: */          /* X86_XCPT_NM - Handled above. */
     4631                case SVM_EXIT_EXCEPTION_8:             /* X86_XCPT_DF */
     4632                case SVM_EXIT_EXCEPTION_9:             /* X86_XCPT_CO_SEG_OVERRUN */
     4633                case SVM_EXIT_EXCEPTION_10:            /* X86_XCPT_TS */
     4634                case SVM_EXIT_EXCEPTION_11:            /* X86_XCPT_NP */
     4635                case SVM_EXIT_EXCEPTION_12:            /* X86_XCPT_SS */
     4636                case SVM_EXIT_EXCEPTION_13:            /* X86_XCPT_GP */
     4637                /*   SVM_EXIT_EXCEPTION_14: */         /* X86_XCPT_PF - Handled above. */
     4638                case SVM_EXIT_EXCEPTION_15:            /* Reserved. */
     4639                /*   SVM_EXIT_EXCEPTION_16: */         /* X86_XCPT_MF - Handled above. */
     4640                /*   SVM_EXIT_EXCEPTION_17: */         /* X86_XCPT_AC - Handled above. */
     4641                case SVM_EXIT_EXCEPTION_18:            /* X86_XCPT_MC */
     4642                case SVM_EXIT_EXCEPTION_19:            /* X86_XCPT_XF */
     4643                case SVM_EXIT_EXCEPTION_20: case SVM_EXIT_EXCEPTION_21: case SVM_EXIT_EXCEPTION_22:
     4644                case SVM_EXIT_EXCEPTION_23: case SVM_EXIT_EXCEPTION_24: case SVM_EXIT_EXCEPTION_25:
     4645                case SVM_EXIT_EXCEPTION_26: case SVM_EXIT_EXCEPTION_27: case SVM_EXIT_EXCEPTION_28:
     4646                case SVM_EXIT_EXCEPTION_29: case SVM_EXIT_EXCEPTION_30: case SVM_EXIT_EXCEPTION_31:
     4647                {
     4648                    /** @todo r=ramshankar; We should be doing
     4649                     *        HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY here! */
     4650
     4651                    PSVMVMCB pVmcb   = pVCpu->hm.s.svm.pVmcb;
    36894652                    SVMEVENT Event;
    36904653                    Event.u          = 0;
     
    40615024{
    40625025    int rc = VINF_SUCCESS;
    4063     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     5026    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    40645027
    40655028    Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
     
    43585321    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    43595322    {
    4360         PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     5323        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    43615324        Assert(pVmcb->ctrl.u64NextRIP);
    43625325        AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb);    /* temporary, remove later */
     
    43695332}
    43705333
    4371 /* Currently only used by nested hw.virt instructions, so ifdef'd as such, otherwise compilers start whining. */
     5334
    43725335#ifdef VBOX_WITH_NESTED_HWVIRT
    43735336/**
     
    43845347    if (pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    43855348    {
    4386         PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     5349        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    43875350        uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
    43885351        Assert(cbInstr == cbLikely);
     
    43935356#endif
    43945357
     5358
    43955359/**
    43965360 * Advances the guest RIP by the number of bytes specified in @a cb. This does
     
    44075371}
    44085372#undef HMSVM_UPDATE_INTR_SHADOW
     5373
     5374
     5375#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM)
     5376/**
     5377 * Merges the guest MSR permission bitmap into the nested-guest MSR permission
     5378 * bitmap.
     5379 *
     5380 * @param   pVCpu               The cross context virtual CPU structure.
     5381 * @param   pvMsrBitmap         Pointer to the guest MSRPM bitmap.
     5382 * @param   pvNstGstMsrBitmap   Pointer to the nested-guest MSRPM bitmap.
     5383 */
     5384static void hmR0SvmMergeMsrpmBitmap(PVMCPU pVCpu, const void *pvMsrBitmap, void *pvNstGstMsrBitmap)
     5385{
     5386    RT_NOREF(pVCpu);
     5387    uint64_t const *puChunk       = (uint64_t *)pvMsrBitmap;
     5388    uint64_t       *puNstGstChunk = (uint64_t *)pvNstGstMsrBitmap;
     5389    uint32_t const cbChunks       = SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT;
     5390    uint32_t const cChunks        = cbChunks / sizeof(*puChunk);
     5391    Assert(cbChunks % sizeof(*puChunk) == 0);
     5392
     5393    for (uint32_t idxChunk = 0, offChunk = 0;
     5394          idxChunk < cChunks;
     5395          idxChunk++, offChunk += sizeof(*puChunk))
     5396    {
     5397        /* Leave reserved offsets (1800h+) untouched (as all bits set, see SVMR0InitVM). */
     5398        if (offChunk >= 0x1800)
     5399            break;
     5400        puNstGstChunk[idxChunk] |= puChunk[idxChunk];
     5401    }
     5402}
     5403
     5404
     5405/**
     5406 * Performs a \#VMEXIT that happens during VMRUN emulation in hmR0SvmExecVmrun.
     5407 *
     5408 * @returns VBox status code.
     5409 * @param   pVCpu           The cross context virtual CPU structure.
     5410 * @param   pCtx            Pointer to the guest-CPU context.
     5411 */
     5412static int hmR0SvmExecVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
     5413{
     5414    /*
     5415     * Disable the global interrupt flag to not cause any interrupts or NMIs
     5416     * in the guest.
     5417     */
     5418    pCtx->hwvirt.svm.fGif = 0;
     5419
     5420    /*
     5421     * Restore the guest's "host" state.
     5422     */
     5423    CPUMSvmVmExitRestoreHostState(pCtx);
     5424
     5425    /*
     5426     * Restore the guest's force-flags.
     5427     */
     5428    if (pCtx->hwvirt.fLocalForcedActions)
     5429    {
     5430        VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);
     5431        pCtx->hwvirt.fLocalForcedActions = 0;
     5432    }
     5433
     5434    /*
     5435     * Restore the modifications we did to the nested-guest VMCB in order
     5436     * to execute the nested-guest in SVM R0.
     5437     */
     5438    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     5439    HMSvmNstGstVmExitNotify(pVCpu, pVmcbNstGst);
     5440
     5441    /*
     5442     * Write the nested-guest VMCB back to nested-guest memory.
     5443     */
     5444    RTGCPHYS const GCPhysVmcb = pCtx->hwvirt.svm.GCPhysVmcb;
     5445    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb, pVmcbNstGst, sizeof(*pVmcbNstGst));
     5446
     5447    /*
     5448     * Clear our cache of the nested-guest VMCB controls.
     5449     */
     5450    PSVMVMCBCTRL pVmcbCtrl = &pVmcbNstGst->ctrl;
     5451    memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl));
     5452    Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
     5453
     5454    if (RT_SUCCESS(rc))
     5455        return VINF_SVM_VMEXIT;
     5456
     5457    Log(("hmR0SvmExecVmexit: Failed to write guest-VMCB at %#RGp\n", GCPhysVmcb));
     5458    return rc;
     5459}
     5460
     5461
     5462/**
     5463 * Caches the nested-guest VMCB fields before we modify them for executing the
     5464 * nested-guest under SVM R0.
     5465 *
     5466 * @param   pCtx            Pointer to the guest-CPU context.
     5467 */
     5468static void hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
     5469{
     5470    PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     5471    PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
     5472    PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     5473
     5474    pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
     5475    pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
     5476    pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
     5477    pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
     5478    pNstGstVmcbCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
     5479    pNstGstVmcbCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
     5480    pNstGstVmcbCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
     5481    pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
     5482    pNstGstVmcbCache->u64VmcbCleanBits  = pVmcbNstGstCtrl->u64VmcbCleanBits;
     5483    pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
     5484    pNstGstVmcbCache->fValid            = true;
     5485}
     5486
     5487
     5488/**
     5489 * Setup execution of the nested-guest in SVM R0.
     5490 *
     5491 * @returns VBox status code.
     5492 * @param   pVCpu           The cross context virtual CPU structure.
     5493 * @param   pCtx            Pointer to the guest-CPU context.
     5494 * @param   GCPhysVmcb      The nested-guest physical address of its VMCB.
     5495 * @param   cbInstr         Length of the VMRUN instruction in bytes.
     5496 */
     5497static int hmR0SvmExecVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb, uint8_t cbInstr)
     5498{
     5499    Assert(CPUMGetGuestCPL(pVCpu) == 0);
     5500    Assert(!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0);
     5501
     5502    /*
     5503     * Cache the physical address of the VMCB for #VMEXIT exceptions.
     5504     */
     5505    pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;
     5506
     5507    /*
     5508     * Save the "host" (guest-state) so that when we do a #VMEXIT we can restore the guest-state.
     5509     *
     5510     * The real host-state shall be saved/restored by the physical CPU once it executes VMRUN
     5511     * with the nested-guest VMCB.
     5512     */
     5513    CPUMSvmVmRunSaveHostState(pCtx, cbInstr);
     5514
     5515    /*
     5516     * Read the nested-guest VMCB state.
     5517     */
     5518    PVM pVM = pVCpu->CTX_SUFF(pVM);
     5519    int rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pVmcb), GCPhysVmcb, sizeof(SVMVMCB));
     5520    if (RT_SUCCESS(rc))
     5521    {
     5522        PSVMVMCB          pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     5523        PSVMVMCBCTRL      pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
     5524        PSVMVMCBSTATESAVE pVmcbNstGstState = &pVmcbNstGst->guest;
     5525
     5526        /*
     5527         * Validate nested-guest state and controls.
     5528         * The rest shall be done by the physical CPU.
     5529         */
     5530        /* VMRUN must always be intercepted. */
     5531        if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))
     5532        {
     5533            Log(("hmR0SvmExecVmrun: VMRUN instruction not intercepted -> #VMEXIT\n"));
     5534            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
     5535            return hmR0SvmExecVmexit(pVCpu, pCtx);
     5536        }
     5537
     5538        /* Nested paging. */
     5539        if (    pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging
     5540            && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)
     5541        {
     5542            Log(("hmR0SvmExecVmrun: Nested paging not supported -> #VMEXIT\n"));
     5543            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
     5544            return hmR0SvmExecVmexit(pVCpu, pCtx);
     5545        }
     5546        /** @todo When implementing nested-paging for the nested-guest don't forget to
     5547         *        adjust/check PAT MSR. */
     5548
     5549        /* AVIC. */
     5550        if (    pVmcbNstGstCtrl->IntCtrl.n.u1AvicEnable
     5551            && !pVM->cpum.ro.GuestFeatures.fSvmAvic)
     5552        {
     5553            Log(("hmR0SvmExecVmrun: AVIC not supported -> #VMEXIT\n"));
     5554            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
     5555            return hmR0SvmExecVmexit(pVCpu, pCtx);
     5556        }
     5557
     5558        /* Last branch record (LBR) virtualization. */
     5559        if (    (pVmcbNstGstCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)
     5560            && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)
     5561        {
     5562            Log(("hmR0SvmExecVmrun: LBR virtualization not supported -> #VMEXIT\n"));
     5563            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
     5564            return hmR0SvmExecVmexit(pVCpu, pCtx);
     5565        }
     5566
     5567        /*
     5568         * MSR permission bitmap (MSRPM).
     5569         */
     5570        RTGCPHYS const GCPhysMsrBitmap = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
     5571        Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
     5572        rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,
     5573                                     SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);
     5574        if (RT_FAILURE(rc))
     5575        {
     5576            Log(("hmR0SvmExecVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
     5577            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
     5578            return hmR0SvmExecVmexit(pVCpu, pCtx);
     5579        }
     5580
     5581        /*
     5582         * IO permission bitmap (IOPM).
     5583         */
     5584        RTHCPHYS HCPhysNstGstMsrpm;
     5585        rc = PGMPhysGCPhys2HCPhys(pVM, pVmcbNstGstCtrl->u64MSRPMPhysAddr, &HCPhysNstGstMsrpm);
     5586        if (RT_FAILURE(rc))
     5587        {
     5588            Log(("hmR0SvmExecVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));
     5589            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
     5590            return hmR0SvmExecVmexit(pVCpu, pCtx);
     5591        }
     5592
     5593        /*
     5594         * EFER MSR.
     5595         */
     5596        uint64_t uValidEfer;
     5597        rc = CPUMQueryValidatedGuestEfer(pVM, pVmcbNstGstState->u64CR0, pVmcbNstGstState->u64EFER, pVmcbNstGstState->u64EFER,
     5598                                         &uValidEfer);
     5599        if (RT_FAILURE(rc))
     5600        {
     5601            Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGstState->u64EFER));
     5602            pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID;
     5603            return hmR0SvmExecVmexit(pVCpu, pCtx);
     5604        }
     5605        bool const fLongModeEnabled         = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);
     5606        bool const fPaging                  = RT_BOOL(pVmcbNstGstState->u64CR0 & X86_CR0_PG);
     5607        bool const fLongModeWithPaging      = fLongModeEnabled && fPaging;
     5608        /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0) and update it. */
     5609        if (fLongModeWithPaging)
     5610            uValidEfer |= MSR_K6_EFER_LMA;
     5611
     5612        /*
     5613         * Cache the nested-guest VMCB fields before we start modifying them below.
     5614         */
     5615        hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
     5616
     5617        /*
     5618         * The IOPM of the nested-guest can be ignored because the the guest always
     5619         * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
     5620         * into the nested-guest one and swap it back on the #VMEXIT.
     5621         */
     5622        pVmcbNstGstCtrl->u64IOPMPhysAddr  = g_HCPhysIOBitmap;
     5623
     5624        /*
     5625         * Load the host-physical address into the MSRPM rather than the nested-guest
     5626         * physical address.
     5627         */
     5628        pVmcbNstGstCtrl->u64MSRPMPhysAddr = HCPhysNstGstMsrpm;
     5629
     5630        /*
     5631         * Merge the guest MSR permission bitmap in to the nested-guest one.
     5632         *
     5633         * Note the assumption here is that our MSRPM is set up only once in SVMR0SetupVM
     5634         * In hmR0SvmPreRunGuestCommittedNested we directly update the nested-guest one.
     5635         * Hence it can be done once here during VMRUN.
     5636         */
     5637        hmR0SvmMergeMsrpmBitmap(pVCpu, pVCpu->hm.s.svm.pvMsrBitmap, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
     5638
     5639        /*
     5640         * Merge the guest exception intercepts in to the nested-guest ones.
     5641         */
     5642        {
     5643            PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     5644            hmR0SvmMergeIntercepts(pVCpu, pVmcb, pVmcbNstGst);
     5645        }
     5646
     5647        /*
     5648         * Check for pending virtual interrupts.
     5649         */
     5650        if (pVmcbNstGstCtrl->IntCtrl.n.u1VIrqPending)
     5651            VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
     5652        else
     5653            Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
     5654
     5655        /*
     5656         * Preserve the required force-flags.
     5657         *
     5658         * We only preserve the force-flags that would affect the execution of the
     5659         * nested-guest (or the guest).
     5660         *
     5661         *   - VMCPU_FF_INHIBIT_INTERRUPTS need -not- be preserved as it's for a single
     5662         *     instruction which is this VMRUN instruction itself.
     5663         *
     5664         *   - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the
     5665         *     execution of a subsequent IRET instruction in the guest.
     5666         *
     5667         *   - The remaining FFs (e.g. timers) can stay in place so that we will be
     5668         *     able to generate interrupts that should cause #VMEXITs for the
     5669         *     nested-guest.
     5670         */
     5671        pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
     5672
     5673        /*
     5674         * Interrupt shadow.
     5675         */
     5676        if (pVmcbNstGstCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
     5677        {
     5678            LogFlow(("hmR0SvmExecVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGstState->u64RIP));
     5679            /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
     5680            EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGstState->u64RIP);
     5681        }
     5682
     5683        /*
     5684         * Load the guest-CPU state.
     5685         * Skip CPL adjustments (will be done by the hardware).
     5686         */
     5687        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, ES, es);
     5688        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, CS, cs);
     5689        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, SS, ss);
     5690        HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbNstGstState, DS, ds);
     5691        pCtx->gdtr.cbGdt   = pVmcbNstGstState->GDTR.u32Limit;
     5692        pCtx->gdtr.pGdt    = pVmcbNstGstState->GDTR.u64Base;
     5693        pCtx->idtr.cbIdt   = pVmcbNstGstState->IDTR.u32Limit;
     5694        pCtx->idtr.pIdt    = pVmcbNstGstState->IDTR.u64Base;
     5695        pCtx->cr0          = pVmcbNstGstState->u64CR0;
     5696        pCtx->cr4          = pVmcbNstGstState->u64CR4;
     5697        pCtx->cr3          = pVmcbNstGstState->u64CR3;
     5698        pCtx->cr2          = pVmcbNstGstState->u64CR2;
     5699        pCtx->dr[6]        = pVmcbNstGstState->u64DR6;
     5700        pCtx->dr[7]        = pVmcbNstGstState->u64DR7;
     5701        pCtx->rflags.u64   = pVmcbNstGstState->u64RFlags;
     5702        pCtx->rax          = pVmcbNstGstState->u64RAX;
     5703        pCtx->rsp          = pVmcbNstGstState->u64RSP;
     5704        pCtx->rip          = pVmcbNstGstState->u64RIP;
     5705        pCtx->msrEFER      = uValidEfer;
     5706
     5707        /* Mask DR6, DR7 bits mandatory set/clear bits. */
     5708        pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
     5709        pCtx->dr[6] |= X86_DR6_RA1_MASK;
     5710        pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
     5711        pCtx->dr[7] |= X86_DR7_RA1_MASK;
     5712
     5713        /*
     5714         * VMRUN loads a subset of the guest-CPU state (see above) and nothing else. Ensure
     5715         * hmR0SvmLoadGuestStateNested doesn't need to load anything back to the VMCB cache
     5716         * as we go straight into executing the nested-guest.
     5717         *
     5718         * If we fall back to ring-3 we would have to re-load things from the guest-CPU
     5719         * state into the VMCB as we are unsure what state we're in (e.g., VMRUN ends up
     5720         * getting executed in IEM along with a handful of nested-guest instructions and
     5721         * we have to continue executing the nested-guest in R0 since IEM doesn't know
     5722         * about this VMCB cache which is in HM).
     5723         */
     5724        PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     5725        pNstGstVmcbCache->fVmrunEmulatedInR0 = true;
     5726        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_ALL_GUEST);
     5727        HMCPU_CF_SET(pVCpu,   HM_CHANGED_HOST_GUEST_SHARED_STATE);
     5728
     5729        /*
     5730         * Clear global interrupt flags to allow interrupts and NMIs in the guest.
     5731         */
     5732        pCtx->hwvirt.svm.fGif = 1;
     5733
     5734        /*
     5735         * Inform PGM about paging mode changes.
     5736         * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet.
     5737         */
     5738        /** @todo What about informing PGM about CR0.WP? */
     5739        PGMFlushTLB(pVCpu, pCtx->cr3, true /* fGlobal */);
     5740
     5741        int rc = PGMChangeMode(pVCpu, pVmcbNstGstState->u64CR0 | X86_CR0_PE, pVmcbNstGstState->u64CR4, pCtx->msrEFER);
     5742        return rc;
     5743    }
     5744
     5745    return rc;
     5746}
     5747#endif /* VBOX_WITH_NESTED_HWVIRT && !VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM */
    44095748
    44105749
     
    45775916    {
    45785917        Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
    4579         PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     5918        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    45805919        uint8_t const cbInstr   = pVmcb->ctrl.u64NextRIP - pCtx->rip;
    45815920        RTGCPTR const GCPtrPage = pVmcb->ctrl.u64ExitInfo1;
     
    46886027    {
    46896028        Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
    4690         PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6029        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    46916030        bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
    46926031        if (fMovCRx)
     
    47286067    {
    47296068        Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE);
    4730         PCSVMVMCB pVmcb = (PCSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6069        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    47316070        bool const fMovCRx = RT_BOOL(pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_MASK);
    47326071        if (fMovCRx)
     
    48016140{
    48026141    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    4803     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6142    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    48046143    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
    48056144
     
    49286267
    49296268        /* Don't intercept DRx read and writes. */
    4930         PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6269        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    49316270        pVmcb->ctrl.u16InterceptRdDRx = 0;
    49326271        pVmcb->ctrl.u16InterceptWrDRx = 0;
     
    50156354    Log4(("hmR0SvmExitIOInstr: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
    50166355
    5017     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    50186356    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
     6357    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    50196358
    50206359    /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */
     
    52326571
    52336572    /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
    5234     PSVMVMCB pVmcb           = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6573    PSVMVMCB pVmcb           = pVCpu->hm.s.svm.pVmcb;
    52356574    uint32_t u32ErrCode      = pVmcb->ctrl.u64ExitInfo1;
    52366575    RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
     
    53366675    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    53376676
    5338     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6677    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    53396678    pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;  /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */
    53406679    pVmcb->ctrl.IntCtrl.n.u8VIntrVector = 0;
     
    54356774
    54366775    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
    5437     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6776    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    54386777    hmR0SvmClearIretIntercept(pVmcb);
    54396778
     
    54546793
    54556794    /* See AMD spec. 15.12.15 "#PF (Page Fault)". */
    5456     PSVMVMCB    pVmcb         = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     6795    PSVMVMCB    pVmcb         = pVCpu->hm.s.svm.pVmcb;
    54576796    uint32_t    u32ErrCode    = pVmcb->ctrl.u64ExitInfo1;
    54586797    RTGCUINTPTR uFaultAddress = pVmcb->ctrl.u64ExitInfo2;
     
    55726911
    55736912    /* Paranoia; Ensure we cannot be called as a result of event delivery. */
    5574     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; NOREF(pVmcb);
    5575     Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid);
     6913    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6914    Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
    55766915
    55776916    /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
     
    56266965
    56276966    /* Paranoia; Ensure we cannot be called as a result of event delivery. */
    5628     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; NOREF(pVmcb);
    5629     Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid);
     6967    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     6968    Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid);  NOREF(pVmcb);
    56306969
    56316970    int rc = VERR_SVM_UNEXPECTED_XCPT_EXIT;
     
    56707009
    56717010    /* Paranoia; Ensure we cannot be called as a result of event delivery. */
    5672     PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; NOREF(pVmcb);
    5673     Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid);
     7011    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     7012    Assert(!pVmcb->ctrl.ExitIntInfo.n.u1Valid); NOREF(pVmcb);
    56747013
    56757014    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
     
    57187057    /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
    57197058       DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */
    5720     PSVMVMCB    pVmcb   = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    5721     PVM         pVM     = pVCpu->CTX_SUFF(pVM);
     7059    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
     7060    PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    57227061    int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6, pVCpu->hm.s.fSingleInstruction);
    57237062    if (rc == VINF_EM_RAW_GUEST_TRAP)
     
    58787217    /** @todo Stat. */
    58797218    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmrun); */
     7219    VBOXSTRICTRC rcStrict;
    58807220    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
    5881     VBOXSTRICTRC rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
     7221#if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM)
     7222    rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr);
     7223#else
     7224    rcStrict = hmR0SvmExecVmrun(pVCpu, pCtx, pCtx->rax, cbInstr);
     7225    if (rcStrict == VINF_SUCCESS)
     7226        rcStrict = VINF_SVM_VMRUN;
     7227#endif
    58827228    return VBOXSTRICTRC_VAL(rcStrict);
    58837229}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette