VirtualBox

Changeset 68293 in vbox for trunk


Ignore:
Timestamp:
Aug 4, 2017 6:05:13 AM (7 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: SVM bits.

Location:
trunk
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm.h

    r68226 r68293  
    157157VMM_INT_DECL(TRPMEVENT)         HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent);
    158158VMM_INT_DECL(int)               HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint32_t *puMsrpmBit);
    159 /** @} */
    160 
    161 /** @name Nested hardware virtualization.
    162  * @{
    163  */
    164 VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
    165 VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb);
    166 VMM_INT_DECL(uint8_t)           HMSvmNstGstGetInterrupt(PCCPUMCTX pCtx);
    167 VMM_INT_DECL(bool)              HMSvmNstGstCanTakePhysInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx);
    168 VMM_INT_DECL(bool)              HMSvmNstGstCanTakeVirtInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx);
    169 VMM_INT_DECL(VBOXSTRICTRC)      HMSvmNstGstHandleCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode,
    170                                                                uint64_t uExitInfo1, uint64_t uExitInfo2);
    171 VMM_INT_DECL(VBOXSTRICTRC)      HMSvmNstGstHandleMsrIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t idMsr, bool fWrite);
    172 VMM_INT_DECL(VBOXSTRICTRC)      HMSvmNstGstHandleIOIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PCSVMIOIOEXITINFO pIoExitInfo,
    173                                                              uint64_t uNextRip);
    174159VMM_INT_DECL(bool)              HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
    175160                                                         uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
    176161                                                         PSVMIOIOEXITINFO pIoExitInfo);
     162VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
     163/** @} */
     164
     165/** @name Nested hardware virtualization.
     166 * @{
     167 */
     168#ifdef VBOX_WITH_NESTED_HWVIRT
    177169VMM_INT_DECL(void)              HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst);
     170#endif
    178171/** @} */
    179172
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r68275 r68293  
    330330
    331331
     332#ifdef VBOX_WITH_NESTED_HWVIRT
    332333/**
    333334 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
     
    364365    pNstGstVmcbCache->fVmrunEmulatedInR0 = false;
    365366}
    366 
     367#endif
     368
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r68279 r68293  
    295295/** Physical address of the IO bitmap. */
    296296RTHCPHYS                    g_HCPhysIOBitmap  = 0;
    297 /** Virtual address of the IO bitmap. */
     297/** Pointer to the IO bitmap. */
    298298R0PTRTYPE(void *)           g_pvIOBitmap      = NULL;
    299299
     300#ifdef VBOX_WITH_NESTED_HWVIRT
     301/** Ring-0 memory object for the nested-guest MSRPM bitmap. */
     302RTR0MEMOBJ                  g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ;
     303/** Physical address of the nested-guest MSRPM bitmap. */
     304RTHCPHYS                    g_HCPhysNstGstMsrBitmap  = 0;
     305/** Pointer to the  nested-guest MSRPM bitmap. */
     306R0PTRTYPE(void *)           g_pvNstGstMsrBitmap      = NULL;
     307#endif
    300308
    301309/**
     
    426434    /* Set all bits to intercept all IO accesses. */
    427435    ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
     436
     437#ifdef VBOX_WITH_NESTED_HWVIRT
     438    /*
     439     * Allocate 8 KB for the MSR permission bitmap for the nested-guest.
     440     */
     441    Assert(g_hMemObjNstGstMsrBitmap == NIL_RTR0MEMOBJ);
     442    rc = RTR0MemObjAllocCont(&g_hMemObjNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
     443    if (RT_FAILURE(rc))
     444        return rc;
     445
     446    g_pvNstGstMsrBitmap     = RTR0MemObjAddress(g_hMemObjNstGstMsrBitmap);
     447    g_HCPhysNstGstMsrBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjNstGstMsrBitmap, 0 /* iPage */);
     448
     449    /* Set all bits to intercept all MSR accesses. */
     450    ASMMemFill32(g_pvNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
     451#endif
     452
    428453    return VINF_SUCCESS;
    429454}
     
    442467        g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
    443468    }
     469
     470#ifdef VBOX_WITH_NESTED_HWVIRT
     471    if (g_hMemObjNstGstMsrBitmap != NIL_RTR0MEMOBJ)
     472    {
     473        RTR0MemObjFree(g_hMemObjNstGstMsrBitmap, true /* fFreeMappings */);
     474        g_pvNstGstMsrBitmap      = NULL;
     475        g_HCPhysNstGstMsrBitmap  = 0;
     476        g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ;
     477    }
     478#endif
    444479}
    445480
     
    19211956                          | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
    19221957                          | HM_CHANGED_GUEST_LAZY_MSRS            /* Unused. */
     1958                          | HM_CHANGED_SVM_NESTED_GUEST
    19231959                          | HM_CHANGED_SVM_RESERVED1              /* Reserved. */
    19241960                          | HM_CHANGED_SVM_RESERVED2
    1925                           | HM_CHANGED_SVM_RESERVED3
    1926                           | HM_CHANGED_SVM_RESERVED4);
     1961                          | HM_CHANGED_SVM_RESERVED3);
    19271962
    19281963    /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
     
    19391974#ifdef VBOX_WITH_NESTED_HWVIRT
    19401975/**
     1976 * Caches the nested-guest VMCB fields before we modify them for executing the
     1977 * nested-guest under SVM R0.
     1978 *
     1979 * @param   pCtx            Pointer to the guest-CPU context.
     1980 *
     1981 * @sa      HMSvmNstGstVmExitNotify.
     1982 */
     1983static void hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
     1984{
     1985    PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     1986    PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
     1987    PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     1988
     1989    pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
     1990    pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
     1991    pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
     1992    pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
     1993    pNstGstVmcbCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
     1994    pNstGstVmcbCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
     1995    pNstGstVmcbCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
     1996    pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
     1997    pNstGstVmcbCache->u64VmcbCleanBits  = pVmcbNstGstCtrl->u64VmcbCleanBits;
     1998    pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
     1999    pNstGstVmcbCache->fValid            = true;
     2000}
     2001
     2002
     2003/**
     2004 * Sets up the nested-guest for hardware-assisted SVM execution.
     2005 *
     2006 * @param   pVCpu           The cross context virtual CPU structure.
     2007 * @param   pCtx            Pointer to the guest-CPU context.
     2008 */
     2009static void hmR0SvmLoadGuestVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     2010{
     2011    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_NESTED_GUEST))
     2012    {
     2013        /*
     2014         * Cache the nested-guest VMCB fields before we start modifying them below.
     2015         */
     2016        hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
     2017
     2018        /*
     2019         * The IOPM of the nested-guest can be ignored because the the guest always
     2020         * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
     2021         * into the nested-guest one and swap it back on the #VMEXIT.
     2022         */
     2023        PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     2024        PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     2025        pVmcbNstGstCtrl->u64IOPMPhysAddr  = g_HCPhysIOBitmap;
     2026
     2027        /*
     2028         * Load the host-physical address into the MSRPM rather than the nested-guest
     2029         * physical address.
     2030         */
     2031        pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
     2032
     2033        /*
     2034         * Merge the guest exception intercepts in to the nested-guest ones.
     2035         */
     2036        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     2037        hmR0SvmMergeIntercepts(pVCpu, pVmcb, pVmcbNstGst);
     2038
     2039        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_NESTED_GUEST);
     2040    }
     2041}
     2042
     2043
     2044/**
    19412045 * Loads the nested-guest state into the VMCB.
    19422046 *
     
    19592063    if (!pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0)
    19602064    {
     2065        hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
    19612066        hmR0SvmLoadGuestControlRegsNested(pVCpu, pVmcbNstGst, pCtx);
    19622067        hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx);
     
    19852090                          | HM_CHANGED_SVM_RESERVED1              /* Reserved. */
    19862091                          | HM_CHANGED_SVM_RESERVED2
    1987                           | HM_CHANGED_SVM_RESERVED3
    1988                           | HM_CHANGED_SVM_RESERVED4);
     2092                          | HM_CHANGED_SVM_RESERVED3);
    19892093
    19902094    /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
     
    42714375                if (RT_SUCCESS(rc))
    42724376                {
    4273                     void const *pvMsrBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
    4274                     bool const fInterceptRd = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit);
    4275                     bool const fInterceptWr = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit + 1);
    4276 
    4277                     if (   (pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE && fInterceptWr)
    4278                         || (pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ  && fInterceptRd))
     4377                    void const *pvMsrBitmap    = pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
     4378                    bool const fInterceptRead = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit);
     4379                    bool const fInterceptWrite = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit + 1);
     4380
     4381                    if (   (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
     4382                        || (fInterceptRead  && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
    42794383                    {
    42804384                        return hmR0SvmExecVmexit(pVCpu, pCtx);
     
    54435547#if defined(VBOX_WITH_NESTED_HWVIRT) && !defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM)
    54445548/**
    5445  * Merges the guest MSR permission bitmap into the nested-guest MSR permission
    5446  * bitmap.
    5447  *
    5448  * @param   pVCpu               The cross context virtual CPU structure.
    5449  * @param   pvMsrBitmap         Pointer to the guest MSRPM bitmap.
    5450  * @param   pvNstGstMsrBitmap   Pointer to the nested-guest MSRPM bitmap.
    5451  */
    5452 static void hmR0SvmMergeMsrpmBitmap(PVMCPU pVCpu, const void *pvMsrBitmap, void *pvNstGstMsrBitmap)
    5453 {
    5454     RT_NOREF(pVCpu);
    5455     uint64_t const *puChunk       = (uint64_t *)pvMsrBitmap;
    5456     uint64_t       *puNstGstChunk = (uint64_t *)pvNstGstMsrBitmap;
    5457     uint32_t const cbChunks       = SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT;
    5458     uint32_t const cChunks        = cbChunks / sizeof(*puChunk);
    5459     Assert(cbChunks % sizeof(*puChunk) == 0);
    5460 
    5461     for (uint32_t idxChunk = 0, offChunk = 0;
    5462           idxChunk < cChunks;
    5463           idxChunk++, offChunk += sizeof(*puChunk))
    5464     {
    5465         /* Leave reserved offsets (1800h+) untouched (as all bits set, see SVMR0InitVM). */
    5466         if (offChunk >= 0x1800)
    5467             break;
    5468         puNstGstChunk[idxChunk] |= puChunk[idxChunk];
    5469     }
    5470 }
    5471 
    5472 
    5473 /**
    54745549 * Performs an SVM world-switch (VMRUN, \#VMEXIT) updating PGM and HM internals.
    54755550 *
     
    54955570
    54965571/**
    5497  * Performs a \#VMEXIT that happens during VMRUN emulation in hmR0SvmExecVmrun.
     5572 * Performs a \#VMEXIT when the VMRUN was emulating using hmR0SvmExecVmrun and
     5573 * optionally then through SVM R0 execution.
    54985574 *
    54995575 * @returns VBox status code.
     
    55435619    Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    55445620
     5621    /*
     5622     * Make sure if VMRUN happens outside this SVM R0 code, we  don't skip setting
     5623     * things up that are required for executing the nested-guest using hardware-assisted SVM.
     5624     */
     5625    pVCpu->hm.s.svm.NstGstVmcbCache.fVmrunEmulatedInR0 = false;
     5626
    55455627    if (RT_SUCCESS(rc))
    55465628    {
     
    55525634    Log(("hmR0SvmExecVmexit: Failed to write guest-VMCB at %#RGp\n", GCPhysVmcb));
    55535635    return rc;
    5554 }
    5555 
    5556 
    5557 /**
    5558  * Caches the nested-guest VMCB fields before we modify them for executing the
    5559  * nested-guest under SVM R0.
    5560  *
    5561  * @param   pCtx            Pointer to the guest-CPU context.
    5562  */
    5563 static void hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
    5564 {
    5565     PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    5566     PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    5567     PSVMNESTEDVMCBCACHE pNstGstVmcbCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    5568 
    5569     pNstGstVmcbCache->u16InterceptRdCRx = pVmcbNstGstCtrl->u16InterceptRdCRx;
    5570     pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrCRx;
    5571     pNstGstVmcbCache->u16InterceptRdDRx = pVmcbNstGstCtrl->u16InterceptRdDRx;
    5572     pNstGstVmcbCache->u16InterceptWrCRx = pVmcbNstGstCtrl->u16InterceptWrDRx;
    5573     pNstGstVmcbCache->u32InterceptXcpt  = pVmcbNstGstCtrl->u32InterceptXcpt;
    5574     pNstGstVmcbCache->u64InterceptCtrl  = pVmcbNstGstCtrl->u64InterceptCtrl;
    5575     pNstGstVmcbCache->u64IOPMPhysAddr   = pVmcbNstGstCtrl->u64IOPMPhysAddr;
    5576     pNstGstVmcbCache->u64MSRPMPhysAddr  = pVmcbNstGstCtrl->u64MSRPMPhysAddr;
    5577     pNstGstVmcbCache->u64VmcbCleanBits  = pVmcbNstGstCtrl->u64VmcbCleanBits;
    5578     pNstGstVmcbCache->fVIntrMasking     = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking;
    5579     pNstGstVmcbCache->fValid            = true;
    55805636}
    55815637
     
    57065762
    57075763        /*
    5708          * Cache the nested-guest VMCB fields before we start modifying them below.
     5764         * Set up the nested-guest for executing it using hardware-assisted SVM.
    57095765         */
    5710         hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
    5711 
    5712         /*
    5713          * The IOPM of the nested-guest can be ignored because the the guest always
    5714          * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
    5715          * into the nested-guest one and swap it back on the #VMEXIT.
    5716          */
    5717         pVmcbNstGstCtrl->u64IOPMPhysAddr  = g_HCPhysIOBitmap;
    5718 
    5719         /*
    5720          * Load the host-physical address into the MSRPM rather than the nested-guest
    5721          * physical address.
    5722          */
    5723         pVmcbNstGstCtrl->u64MSRPMPhysAddr = HCPhysNstGstMsrpm;
    5724 
    5725         /*
    5726          * Merge the guest MSR permission bitmap in to the nested-guest one.
    5727          *
    5728          * Note the assumption here is that our MSRPM is set up only once in SVMR0SetupVM
    5729          * In hmR0SvmPreRunGuestCommittedNested we directly update the nested-guest one.
    5730          * Hence it can be done once here during VMRUN.
    5731          */
    5732         hmR0SvmMergeMsrpmBitmap(pVCpu, pVCpu->hm.s.svm.pvMsrBitmap, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));
    5733 
    5734         /*
    5735          * Merge the guest exception intercepts in to the nested-guest ones.
    5736          */
    5737         {
    5738             PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    5739             hmR0SvmMergeIntercepts(pVCpu, pVmcb, pVmcbNstGst);
    5740         }
     5766        hmR0SvmLoadGuestVmcbNested(pVCpu, pCtx);
    57415767
    57425768        /*
  • trunk/src/VBox/VMM/include/HMInternal.h

    r68226 r68293  
    186186#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(21)
    187187#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(22)
    188 #define HM_CHANGED_SVM_RESERVED4                 RT_BIT(23)
     188#define HM_CHANGED_SVM_NESTED_GUEST              RT_BIT(23)
    189189
    190190#define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette