VirtualBox

Ignore:
Timestamp:
Mar 28, 2018 6:32:43 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HM: Fixes to MSRPM bit accesses. Implemented merging of guest and nested-guest MSRPMs. Other nits and cleanups.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71504 r71529  
    326326*   Internal Functions                                                                                                           *
    327327*********************************************************************************************************************************/
    328 static void hmR0SvmSetMsrPermission(PSVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
     328static void hmR0SvmSetMsrPermission(PCPUMCTX pCtx, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
    329329                                    SVMMSREXITWRITE enmWrite);
    330330static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
     
    393393*********************************************************************************************************************************/
    394394/** Ring-0 memory object for the IO bitmap. */
    395 RTR0MEMOBJ                  g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
     395static RTR0MEMOBJ           g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
    396396/** Physical address of the IO bitmap. */
    397 RTHCPHYS                    g_HCPhysIOBitmap  = 0;
     397static RTHCPHYS             g_HCPhysIOBitmap;
    398398/** Pointer to the IO bitmap. */
    399 R0PTRTYPE(void *)           g_pvIOBitmap      = NULL;
    400 
    401 #ifdef VBOX_WITH_NESTED_HWVIRT
    402 /** Ring-0 memory object for the nested-guest MSRPM bitmap. */
    403 RTR0MEMOBJ                  g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ;
    404 /** Physical address of the nested-guest MSRPM bitmap. */
    405 RTHCPHYS                    g_HCPhysNstGstMsrBitmap  = 0;
    406 /** Pointer to the  nested-guest MSRPM bitmap. */
    407 R0PTRTYPE(void *)           g_pvNstGstMsrBitmap      = NULL;
    408 #endif
    409 
     399static R0PTRTYPE(void *)        g_pvIOBitmap;
    410400
    411401#ifdef VBOX_STRICT
    412 # define HMSVM_LOG_CS          RT_BIT_32(0)
    413 # define HMSVM_LOG_SS          RT_BIT_32(1)
    414 # define HMSVM_LOG_FS          RT_BIT_32(2)
    415 # define HMSVM_LOG_GS          RT_BIT_32(3)
    416 # define HMSVM_LOG_LBR         RT_BIT_32(4)
    417 # define HMSVM_LOG_ALL         (  HMSVM_LOG_CS \
    418                                 | HMSVM_LOG_SS \
    419                                 | HMSVM_LOG_FS \
    420                                 | HMSVM_LOG_GS \
    421                                 | HMSVM_LOG_LBR)
    422 
    423 /**
    424  * Dumps CPU state and additional info. to the logger for diagnostics.
     402# define HMSVM_LOG_CS           RT_BIT_32(0)
     403# define HMSVM_LOG_SS           RT_BIT_32(1)
     404# define HMSVM_LOG_FS           RT_BIT_32(2)
     405# define HMSVM_LOG_GS           RT_BIT_32(3)
     406# define HMSVM_LOG_LBR          RT_BIT_32(4)
     407# define HMSVM_LOG_ALL          (  HMSVM_LOG_CS \
     408                                 | HMSVM_LOG_SS \
     409                                 | HMSVM_LOG_FS \
     410                                 | HMSVM_LOG_GS \
     411                                 | HMSVM_LOG_LBR)
     412
     413/**
     414 * Dumps virtual CPU state and additional info. to the logger for diagnostics.
    425415 *
    426416 * @param   pVCpu       The cross context virtual CPU structure.
     
    468458    NOREF(pVmcbGuest);
    469459}
    470 #endif
     460#endif  /* VBOX_STRICT */
    471461
    472462
     
    585575{
    586576    /*
    587      * Allocate 12 KB for the IO bitmap. Since this is non-optional and we always intercept all IO accesses, it's done
    588      * once globally here instead of per-VM.
     577     * Allocate 12 KB (3 pages) for the IO bitmap. Since this is non-optional and we always
     578     * intercept all IO accesses, it's done once globally here instead of per-VM.
    589579     */
    590580    Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ);
     
    599589    ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
    600590
    601 #ifdef VBOX_WITH_NESTED_HWVIRT
    602     /*
    603      * Allocate 8 KB for the MSR permission bitmap for the nested-guest.
    604      */
    605     Assert(g_hMemObjNstGstMsrBitmap == NIL_RTR0MEMOBJ);
    606     rc = RTR0MemObjAllocCont(&g_hMemObjNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */);
    607     if (RT_FAILURE(rc))
    608         return rc;
    609 
    610     g_pvNstGstMsrBitmap     = RTR0MemObjAddress(g_hMemObjNstGstMsrBitmap);
    611     g_HCPhysNstGstMsrBitmap = RTR0MemObjGetPagePhysAddr(g_hMemObjNstGstMsrBitmap, 0 /* iPage */);
    612 
    613     /* Set all bits to intercept all MSR accesses. */
    614     ASMMemFill32(g_pvNstGstMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff));
    615 #endif
    616 
    617591    return VINF_SUCCESS;
    618592}
     
    631605        g_hMemObjIOBitmap = NIL_RTR0MEMOBJ;
    632606    }
    633 
    634 #ifdef VBOX_WITH_NESTED_HWVIRT
    635     if (g_hMemObjNstGstMsrBitmap != NIL_RTR0MEMOBJ)
    636     {
    637         RTR0MemObjFree(g_hMemObjNstGstMsrBitmap, true /* fFreeMappings */);
    638         g_pvNstGstMsrBitmap      = NULL;
    639         g_HCPhysNstGstMsrBitmap  = 0;
    640         g_hMemObjNstGstMsrBitmap = NIL_RTR0MEMOBJ;
    641     }
    642 #endif
    643607}
    644608
     
    850814
    851815/**
    852  * Sets the permission bits for the specified MSR in the MSRPM.
    853  *
    854  * @param   pVmcb           Pointer to the VM control block.
     816 * Sets the permission bits for the specified MSR in the MSRPM bitmap.
     817 *
     818 * @param   pCtx            Pointer to the guest-CPU or nested-guest-CPU context.
    855819 * @param   pbMsrBitmap     Pointer to the MSR bitmap.
    856  * @param   uMsr            The MSR for which the access permissions are being set.
     820 * @param   idMsr           The MSR for which the permissions are being set.
    857821 * @param   enmRead         MSR read permissions.
    858822 * @param   enmWrite        MSR write permissions.
    859  */
    860 static void hmR0SvmSetMsrPermission(PSVMVMCB pVmcb, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
     823 *
     824 * @remarks This function does -not- clear the VMCB clean bits for MSRPM. The
     825 *          caller needs to take care of this.
     826 */
     827static void hmR0SvmSetMsrPermission(PCPUMCTX pCtx, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
    861828                                    SVMMSREXITWRITE enmWrite)
    862829{
    863     uint16_t offMsrpm;
    864     uint32_t uMsrpmBit;
    865     int rc = HMSvmGetMsrpmOffsetAndBit(uMsr, &offMsrpm, &uMsrpmBit);
     830    bool const  fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
     831    uint16_t    offMsrpm;
     832    uint8_t     uMsrpmBit;
     833    int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
    866834    AssertRC(rc);
    867835
    868     Assert(uMsrpmBit < 0x3fff);
     836    Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
    869837    Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
    870838
     
    873841        ASMBitSet(pbMsrBitmap, uMsrpmBit);
    874842    else
    875         ASMBitClear(pbMsrBitmap, uMsrpmBit);
     843    {
     844        if (!fInNestedGuestMode)
     845            ASMBitClear(pbMsrBitmap, uMsrpmBit);
     846#ifdef VBOX_WITH_NESTED_HWVIRT
     847        else
     848        {
     849            /* Only clear the bit if the nested-guest is also not intercepting the MSR read.*/
     850            uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
     851            pbNstGstMsrBitmap += offMsrpm;
     852            if (!ASMBitTest(pbNstGstMsrBitmap, uMsrpmBit))
     853                ASMBitClear(pbMsrBitmap, uMsrpmBit);
     854            else
     855                Assert(ASMBitTest(pbMsrBitmap, uMsrpmBit));
     856        }
     857#endif
     858    }
    876859
    877860    if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
    878861        ASMBitSet(pbMsrBitmap, uMsrpmBit + 1);
    879862    else
    880         ASMBitClear(pbMsrBitmap, uMsrpmBit + 1);
    881 
    882     pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     863    {
     864        if (!fInNestedGuestMode)
     865            ASMBitClear(pbMsrBitmap, uMsrpmBit + 1);
     866#ifdef VBOX_WITH_NESTED_HWVIRT
     867        else
     868        {
     869            /* Only clear the bit if the nested-guest is also not intercepting the MSR write.*/
     870            uint8_t const *pbNstGstMsrBitmap = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
     871            pbNstGstMsrBitmap += offMsrpm;
     872            if (!ASMBitTest(pbNstGstMsrBitmap, uMsrpmBit + 1))
     873                ASMBitClear(pbMsrBitmap, uMsrpmBit + 1);
     874            else
     875                Assert(ASMBitTest(pbMsrBitmap, uMsrpmBit + 1));
     876        }
     877#endif
     878    }
    883879}
    884880
     
    10491045         */
    10501046        uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    1051         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1052         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1053         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1054         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1055         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1056         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1057         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1058         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1059         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    1060         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1047        PCPUMCTX pCtx        = CPUMQueryGuestCtxPtr(pVCpu);
     1048        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1049        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1050        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1051        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1052        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1053        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1054        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1055        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1056        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1057        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1058        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    10611059    }
    10621060
     
    19691967            /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
    19701968            if (fPendingIntr)
    1971                 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1969                hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
    19721970            else
    19731971            {
    1974                 hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1972                hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    19751973                pVCpu->hm.s.svm.fSyncVTpr = true;
    19761974            }
     1975            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    19771976        }
    19781977        else
     
    23582357
    23592358#ifdef VBOX_WITH_NESTED_HWVIRT
     2359/**
     2360 * Merges the guest and nested-guest MSR permission bitmap.
     2361 *
     2362 * If the guest is intercepting an MSR we need to intercept it regardless of
     2363 * whether the nested-guest is intercepting it or not.
     2364 *
     2365 * @param   pHostCpu    Pointer to the physical CPU HM info. struct.
     2366 * @param   pVCpu       The cross context virtual CPU structure.
     2367 * @param   pCtx        Pointer to the nested-guest-CPU context.
     2368 */
     2369static void hmR0SvmMergeMsrpm(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCPUMCTX pCtx)
     2370{
     2371    uint64_t const *pu64GstMsrpm    = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap;
     2372    uint64_t const *pu64NstGstMsrpm = (uint64_t const *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
     2373    uint64_t       *pu64DstMsrpm    = (uint64_t *)pHostCpu->n.svm.pvNstGstMsrpm;
     2374
     2375    /* MSRPM bytes from offset 0x1800 are reserved, so we stop merging there. */
     2376    uint32_t const offRsvdQwords = 0x1800 >> 3;
     2377    for (uint32_t i = 0; i < offRsvdQwords; i++)
     2378        pu64DstMsrpm[i] = pu64NstGstMsrpm[i] | pu64GstMsrpm[i];
     2379}
     2380
     2381
    23602382/**
    23612383 * Caches the nested-guest VMCB fields before we modify them for execution using
     
    24312453         * The IOPM of the nested-guest can be ignored because the the guest always
    24322454         * intercepts all IO port accesses. Thus, we'll swap to the guest IOPM rather
    2433          * into the nested-guest one and swap it back on the #VMEXIT.
     2455         * than the nested-guest IOPM and swap the field back on the #VMEXIT.
    24342456         */
    24352457        pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap;
    2436 
    2437         /*
    2438          * Load the host-physical address into the MSRPM rather than the nested-guest
    2439          * physical address (currently we trap all MSRs in the nested-guest).
    2440          */
    2441         pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap;
    24422458
    24432459        /*
    24442460         * Use the same nested-paging as the "outer" guest. We can't dynamically
    24452461         * switch off nested-paging suddenly while executing a VM (see assertion at the
    2446          * end of Trap0eHandler in PGMAllBth.h).
     2462         * end of Trap0eHandler() in PGMAllBth.h).
    24472463         */
    24482464        pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
     
    24572473    {
    24582474        Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap);
    2459         Assert(pVmcbNstGstCtrl->u64MSRPMPhysAddr = g_HCPhysNstGstMsrBitmap);
    24602475        Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPaging.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
    24612476    }
     
    25322547    return rc;
    25332548}
    2534 #endif
     2549#endif /* VBOX_WITH_NESTED_HWVIRT */
    25352550
    25362551
     
    42904305    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcbNstGst);
    42914306
     4307    /* Pre-load the guest FPU state. */
    42924308    if (!CPUMIsGuestFPUStateActive(pVCpu))
    42934309    {
     
    43064322    AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    43074323
     4324    PHMGLOBALCPUINFO pHostCpu  = hmR0GetCurrentCpu();
     4325    RTCPUID const idCurrentCpu = pHostCpu->idCpu;
     4326    bool const    fMigratedCpu = idCurrentCpu != pVCpu->hm.s.idLastCpu;
     4327
    43084328    /* Setup TSC offsetting. */
    4309     RTCPUID idCurrentCpu = hmR0GetCurrentCpu()->idCpu;
    43104329    if (   pSvmTransient->fUpdateTscOffsetting
    4311         || idCurrentCpu != pVCpu->hm.s.idLastCpu)   /** @todo is this correct for nested-guests where
    4312                                                               nested-VCPU<->physical-CPU mapping doesn't exist. */
     4330        || fMigratedCpu)
    43134331    {
    43144332        hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst);
     
    43174335
    43184336    /* If we've migrating CPUs, mark the VMCB Clean bits as dirty. */
    4319     if (idCurrentCpu != pVCpu->hm.s.idLastCpu)
     4337    if (fMigratedCpu)
    43204338        pVmcbNstGst->ctrl.u32VmcbCleanBits = 0;
    43214339
     
    43344352    }
    43354353    pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
     4354
     4355    /* Merge the guest and nested-guest MSRPM. */
     4356    hmR0SvmMergeMsrpm(pHostCpu, pVCpu, pCtx);
     4357
     4358    /* Update the nested-guest VMCB to use the newly merged MSRPM. */
     4359    pVmcbNstGst->ctrl.u64MSRPMPhysAddr = pHostCpu->n.svm.HCPhysNstGstMsrpm;
    43364360
    43374361    /* The TLB flushing would've already been setup by the nested-hypervisor. */
     
    43554379        && !(pVmcbNstGst->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
    43564380    {
    4357         hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     4381        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     4382        pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     4383
    43584384        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
    43594385        uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
     
    43644390    else
    43654391    {
    4366         hmR0SvmSetMsrPermission(pVmcbNstGst, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     4392        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     4393        pVmcbNstGst->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    43674394        pSvmTransient->fRestoreTscAuxMsr = false;
    43684395    }
     
    44694496        && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
    44704497    {
    4471         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     4498        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     4499        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     4500
    44724501        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
    44734502        uint64_t u64GuestTscAux = CPUMR0GetGuestTscAux(pVCpu);
     
    44784507    else
    44794508    {
    4480         hmR0SvmSetMsrPermission(pVmcb, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     4509        hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_TSC_AUX, SVMMSREXIT_INTERCEPT_READ, SVMMSREXIT_INTERCEPT_WRITE);
     4510        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    44814511        pSvmTransient->fRestoreTscAuxMsr = false;
    44824512    }
     
    50315061    const uint16_t    u16Port       = pIoExitInfo->n.u16Port;
    50325062    const SVMIOIOTYPE enmIoType     = (SVMIOIOTYPE)pIoExitInfo->n.u1Type;
    5033     const uint8_t     cbReg         = (pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7;
     5063    const uint8_t     cbReg         = (pIoExitInfo->u  >> SVM_IOIO_OP_SIZE_SHIFT)  & 7;
    50345064    const uint8_t     cAddrSizeBits = ((pIoExitInfo->u >> SVM_IOIO_ADDR_SIZE_SHIFT) & 7) << 4;
    50355065    const uint8_t     iEffSeg       = pIoExitInfo->n.u3SEG;
     
    51225152                uint32_t const idMsr = pCtx->ecx;
    51235153                uint16_t offMsrpm;
    5124                 uint32_t uMsrpmBit;
     5154                uint8_t uMsrpmBit;
    51255155                int rc = HMSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);
    51265156                if (RT_SUCCESS(rc))
    51275157                {
    5128                     void const *pvMsrBitmap    = pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
    5129                     bool const fInterceptRead  = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit);
    5130                     bool const fInterceptWrite = ASMBitTest(pvMsrBitmap, (offMsrpm << 3) + uMsrpmBit + 1);
     5158                    Assert(uMsrpmBit == 0 || uMsrpmBit == 2 || uMsrpmBit == 4 || uMsrpmBit == 6);
     5159                    Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);
     5160
     5161                    uint8_t const *pbMsrBitmap = (uint8_t const *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);
     5162                    pbMsrBitmap               += offMsrpm;
     5163                    bool const fInterceptRead  = ASMBitTest(pbMsrBitmap, uMsrpmBit);
     5164                    bool const fInterceptWrite = ASMBitTest(pbMsrBitmap, uMsrpmBit + 1);
    51315165
    51325166                    if (   (fInterceptWrite && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette