VirtualBox

Changeset 43469 in vbox


Ignore:
Timestamp:
Sep 28, 2012 3:40:14 PM (12 years ago)
Author:
vboxsync
Message:

VMM: HM bits.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r43468 r43469  
    294294        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    295295        {
    296             rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjMSRBitmap, PAGE_SIZE, false /* fExecutable */);
     296            rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, PAGE_SIZE, false /* fExecutable */);
    297297            AssertRC(rc);
    298298            if (RT_FAILURE(rc))
    299299                return rc;
    300300
    301             pVCpu->hm.s.vmx.pvMSRBitmap     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjMSRBitmap);
    302             pVCpu->hm.s.vmx.HCPhysMSRBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjMSRBitmap, 0);
    303             memset(pVCpu->hm.s.vmx.pvMSRBitmap, 0xff, PAGE_SIZE);
     301            pVCpu->hm.s.vmx.pvMsrBitmap     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjMsrBitmap);
     302            pVCpu->hm.s.vmx.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjMsrBitmap, 0);
     303            memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
    304304        }
    305305
    306306#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    307307        /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */
    308         rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjGuestMSR, PAGE_SIZE, false /* fExecutable */);
     308        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjGuestMsr, PAGE_SIZE, false /* fExecutable */);
    309309        AssertRC(rc);
    310310        if (RT_FAILURE(rc))
    311311            return rc;
    312312
    313         pVCpu->hm.s.vmx.pvGuestMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjGuestMSR);
    314         pVCpu->hm.s.vmx.HCPhysGuestMSR = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjGuestMSR, 0);
    315         Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMSR & 0xf));
    316         memset(pVCpu->hm.s.vmx.pvGuestMSR, 0, PAGE_SIZE);
     313        pVCpu->hm.s.vmx.pvGuestMsr     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjGuestMsr);
     314        pVCpu->hm.s.vmx.HCPhysGuestMsr = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjGuestMsr, 0);
     315        Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf));
     316        memset(pVCpu->hm.s.vmx.pvGuestMsr, 0, PAGE_SIZE);
    317317
    318318        /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */
    319         rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjHostMSR, PAGE_SIZE, false /* fExecutable */);
     319        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjHostMsr, PAGE_SIZE, false /* fExecutable */);
    320320        AssertRC(rc);
    321321        if (RT_FAILURE(rc))
    322322            return rc;
    323323
    324         pVCpu->hm.s.vmx.pvHostMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjHostMSR);
    325         pVCpu->hm.s.vmx.HCPhysHostMSR = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjHostMSR, 0);
    326         Assert(!(pVCpu->hm.s.vmx.HCPhysHostMSR & 0xf));
    327         memset(pVCpu->hm.s.vmx.pvHostMSR, 0, PAGE_SIZE);
     324        pVCpu->hm.s.vmx.pvHostMsr     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjHostMsr);
     325        pVCpu->hm.s.vmx.HCPhysHostMsr = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjHostMsr, 0);
     326        Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf));
     327        memset(pVCpu->hm.s.vmx.pvHostMsr, 0, PAGE_SIZE);
    328328#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    329329
     
    366366            pVCpu->hm.s.vmx.HCPhysVAPIC  = 0;
    367367        }
    368         if (pVCpu->hm.s.vmx.hMemObjMSRBitmap != NIL_RTR0MEMOBJ)
    369         {
    370             RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjMSRBitmap, false);
    371             pVCpu->hm.s.vmx.hMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    372             pVCpu->hm.s.vmx.pvMSRBitmap       = 0;
    373             pVCpu->hm.s.vmx.HCPhysMSRBitmap   = 0;
     368        if (pVCpu->hm.s.vmx.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
     369        {
     370            RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjMsrBitmap, false);
     371            pVCpu->hm.s.vmx.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
     372            pVCpu->hm.s.vmx.pvMsrBitmap       = 0;
     373            pVCpu->hm.s.vmx.HCPhysMsrBitmap   = 0;
    374374        }
    375375#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    376         if (pVCpu->hm.s.vmx.hMemObjHostMSR != NIL_RTR0MEMOBJ)
    377         {
    378             RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjHostMSR, false);
    379             pVCpu->hm.s.vmx.hMemObjHostMSR = NIL_RTR0MEMOBJ;
    380             pVCpu->hm.s.vmx.pvHostMSR       = 0;
    381             pVCpu->hm.s.vmx.HCPhysHostMSR   = 0;
    382         }
    383         if (pVCpu->hm.s.vmx.hMemObjGuestMSR != NIL_RTR0MEMOBJ)
    384         {
    385             RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjGuestMSR, false);
    386             pVCpu->hm.s.vmx.hMemObjGuestMSR = NIL_RTR0MEMOBJ;
    387             pVCpu->hm.s.vmx.pvGuestMSR       = 0;
    388             pVCpu->hm.s.vmx.HCPhysGuestMSR   = 0;
     376        if (pVCpu->hm.s.vmx.hMemObjHostMsr != NIL_RTR0MEMOBJ)
     377        {
     378            RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjHostMsr, false);
     379            pVCpu->hm.s.vmx.hMemObjHostMsr = NIL_RTR0MEMOBJ;
     380            pVCpu->hm.s.vmx.pvHostMsr       = 0;
     381            pVCpu->hm.s.vmx.HCPhysHostMsr   = 0;
     382        }
     383        if (pVCpu->hm.s.vmx.hMemObjGuestMsr != NIL_RTR0MEMOBJ)
     384        {
     385            RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjGuestMsr, false);
     386            pVCpu->hm.s.vmx.hMemObjGuestMsr = NIL_RTR0MEMOBJ;
     387            pVCpu->hm.s.vmx.pvGuestMsr       = 0;
     388            pVCpu->hm.s.vmx.HCPhysGuestMsr   = 0;
    389389        }
    390390#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
     
    570570        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    571571        {
    572             Assert(pVCpu->hm.s.vmx.HCPhysMSRBitmap);
     572            Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    573573            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
    574574        }
     
    658658        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    659659        {
    660             Assert(pVCpu->hm.s.vmx.HCPhysMSRBitmap);
    661 
    662             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMSRBitmap);
     660            Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
     661
     662            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    663663            AssertRC(rc);
    664664
     
    684684         * Set the guest & host MSR load/store physical addresses.
    685685         */
    686         Assert(pVCpu->hm.s.vmx.HCPhysGuestMSR);
    687         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMSR);
     686        Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
     687        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
    688688        AssertRC(rc);
    689         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMSR);
     689        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
    690690        AssertRC(rc);
    691         Assert(pVCpu->hm.s.vmx.HCPhysHostMSR);
    692         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.HCPhysHostMSR);
     691        Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
     692        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.HCPhysHostMsr);
    693693        AssertRC(rc);
    694694#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
     
    808808{
    809809    unsigned ulBit;
    810     uint8_t *pvMSRBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMSRBitmap;
     810    uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
    811811
    812812    /*
     
    827827        /* AMD Sixth Generation x86 Processor MSRs */
    828828        ulBit = (ulMSR - 0xC0000000);
    829         pvMSRBitmap += 0x400;
     829        pvMsrBitmap += 0x400;
    830830    }
    831831    else
     
    837837    Assert(ulBit <= 0x1fff);
    838838    if (fRead)
    839         ASMBitClear(pvMSRBitmap, ulBit);
     839        ASMBitClear(pvMsrBitmap, ulBit);
    840840    else
    841         ASMBitSet(pvMSRBitmap, ulBit);
     841        ASMBitSet(pvMsrBitmap, ulBit);
    842842
    843843    if (fWrite)
    844         ASMBitClear(pvMSRBitmap + 0x800, ulBit);
     844        ASMBitClear(pvMsrBitmap + 0x800, ulBit);
    845845    else
    846         ASMBitSet(pvMSRBitmap + 0x800, ulBit);
     846        ASMBitSet(pvMsrBitmap + 0x800, ulBit);
    847847}
    848848
     
    13561356         * the world switch back to the host.
    13571357         */
    1358         PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMSR;
     1358        PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
    13591359        unsigned idxMsr = 0;
    13601360
     
    21522152     * during VM-entry and restored into the VM-exit store area during VM-exit.
    21532153     */
    2154     PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMSR;
     2154    PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    21552155    unsigned idxMsr = 0;
    21562156
     
    22062206    }
    22072207
    2208     pVCpu->hm.s.vmx.cCachedMSRs = idxMsr;
     2208    pVCpu->hm.s.vmx.cCachedMsrs = idxMsr;
    22092209
    22102210    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr);
     
    24052405     * Save the possibly changed MSRs that we automatically restore and save during a world switch.
    24062406     */
    2407     for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedMSRs; i++)
    2408     {
    2409         PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMSR;
     2407    for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedMsrs; i++)
     2408    {
     2409        PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    24102410        pMsr += i;
    24112411
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r43468 r43469  
    11141114            for (VMCPUID i = 0; i < pVM->cCpus; i++)
    11151115            {
    1116                 LogRel(("HM: VCPU%d: MSR bitmap physaddr    = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMSRBitmap));
     1116                LogRel(("HM: VCPU%d: MSR bitmap physaddr    = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
    11171117                LogRel(("HM: VCPU%d: VMCS physaddr          = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS));
    11181118            }
     
    26882688                if (pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
    26892689                {
    2690                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMSRBitmap));
     2690                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
    26912691#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    2692                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys  %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestMSR));
    2693                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys   %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostMSR));
    2694                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs   %x\n",   i, pVM->aCpus[i].hm.s.vmx.cCachedMSRs));
     2692                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys  %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestMsr));
     2693                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys   %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostMsr));
     2694                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs   %x\n",   i, pVM->aCpus[i].hm.s.vmx.cCachedMsrs));
    26952695#endif
    26962696                }
  • trunk/src/VBox/VMM/include/HMInternal.h

    r43468 r43469  
    336336        R0PTRTYPE(uint8_t *)        pbApicAccess;
    337337
    338         /** R0 memory object for the MSR entry load page (guest MSRs). */
    339         RTR0MEMOBJ                  hMemObjMSREntryLoad;
    340         /** Physical address of the MSR entry load page (guest MSRs). */
    341         RTHCPHYS                    HCPhysMSREntryLoad;
    342         /** Virtual address of the MSR entry load page (guest MSRs). */
    343         R0PTRTYPE(void *)           pvMSREntryLoad;
    344 
    345338#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    346339        RTR0MEMOBJ                  hMemObjScratch;
     
    348341        R0PTRTYPE(uint8_t *)        pbScratch;
    349342#endif
    350         /** R0 memory object for the MSR exit store page (guest MSRs). */
    351         RTR0MEMOBJ                  hMemObjMSRExitStore;
    352         /** Physical address of the MSR exit store page (guest MSRs). */
    353         RTHCPHYS                    HCPhysMSRExitStore;
    354         /** Virtual address of the MSR exit store page (guest MSRs). */
    355         R0PTRTYPE(void *)           pvMSRExitStore;
    356 
    357         /** R0 memory object for the MSR exit load page (host MSRs). */
    358         RTR0MEMOBJ                  hMemObjMSRExitLoad;
    359         /** Physical address of the MSR exit load page (host MSRs). */
    360         RTHCPHYS                    HCPhysMSRExitLoad;
    361         /** Virtual address of the MSR exit load page (host MSRs). */
    362         R0PTRTYPE(void *)           pvMSRExitLoad;
    363 
    364343        /** Ring 0 handlers for VT-x. */
    365344        DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu));
     
    605584        RTHCPHYS                    GCPhysEPTP;
    606585
    607         /** Physical address of the MSR bitmap (1 page). */
    608         RTHCPHYS                    HCPhysMSRBitmap;
    609         /** R0 memory object for the MSR bitmap (1 page). */
    610         RTR0MEMOBJ                  hMemObjMSRBitmap;
    611         /** Virtual address of the MSR bitmap (1 page). */
    612         R0PTRTYPE(void *)           pvMSRBitmap;
     586        /** Physical address of the MSR bitmap. */
     587        RTHCPHYS                    HCPhysMsrBitmap;
     588        /** R0 memory object for the MSR bitmap. */
     589        RTR0MEMOBJ                  hMemObjMsrBitmap;
     590        /** Virtual address of the MSR bitmap. */
     591        R0PTRTYPE(void *)           pvMsrBitmap;
    613592
    614593#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    615         /** Physical address of the guest MSR load area (1 page). */
    616         RTHCPHYS                    HCPhysGuestMSR;
    617         /** R0 memory object for the guest MSR load area (1 page). */
    618         RTR0MEMOBJ                  hMemObjGuestMSR;
    619         /** Virtual address of the guest MSR load area (1 page). */
    620         R0PTRTYPE(void *)           pvGuestMSR;
    621 
    622         /** Physical address of the MSR load area (1 page). */
    623         RTHCPHYS                    HCPhysHostMSR;
    624         /** R0 memory object for the MSR load area (1 page). */
    625         RTR0MEMOBJ                  hMemObjHostMSR;
    626         /** Virtual address of the MSR load area (1 page). */
    627         R0PTRTYPE(void *)           pvHostMSR;
     594        /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
     595         *  for guest MSRs). */
     596        RTHCPHYS                    HCPhysGuestMsr;
     597        /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
     598         *  (used for guest MSRs). */
     599        RTR0MEMOBJ                  hMemObjGuestMsr;
     600        /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
     601         *  for guest MSRs). */
     602        R0PTRTYPE(void *)           pvGuestMsr;
     603
     604        /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
     605        RTHCPHYS                    HCPhysHostMsr;
     606        /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
     607        RTR0MEMOBJ                  hMemObjHostMsr;
     608        /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
     609        R0PTRTYPE(void *)           pvHostMsr;
    628610
    629611        /* Number of automatically loaded/restored guest MSRs during the world switch. */
    630         uint32_t                    cCachedMSRs;
     612        uint32_t                    cCachedMsrs;
    631613        uint32_t                    uAlignment;
    632614#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette