VirtualBox

Changeset 22040 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 6, 2009 4:33:21 PM (15 years ago)
Author:
vboxsync
Message:

VT-x: use MSR bitmaps and automatic load/store (risky change).

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCM.cpp

    r22015 r22040  
    946946
    947947            LogRel(("HWACCM: TPR shadow physaddr           = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
    948             LogRel(("HWACCM: MSR bitmap physaddr           = %RHp\n", pVM->hwaccm.s.vmx.pMSRBitmapPhys));
     948
     949            /* Paranoia */
     950            AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
    949951
    950952            for (unsigned i=0;i<pVM->cCPUs;i++)
    951                 LogRel(("HWACCM: VMCS physaddr VCPU%d           = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
     953            {
     954                LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr      = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
     955                LogRel(("HWACCM: VCPU%d: VMCS physaddr            = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));
     956            }
    952957
    953958#ifdef HWACCM_VTX_WITH_EPT
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r22016 r22040  
    336336        R0PTRTYPE(uint8_t *)        pAPIC;
    337337
    338         /** R0 memory object for the MSR bitmap (1 page). */
    339         RTR0MEMOBJ                  pMemObjMSRBitmap;
    340         /** Physical address of the MSR bitmap (1 page). */
    341         RTHCPHYS                    pMSRBitmapPhys;
    342         /** Virtual address of the MSR bitmap (1 page). */
    343         R0PTRTYPE(uint8_t *)        pMSRBitmap;
    344 
    345338        /** R0 memory object for the MSR entry load page (guest MSRs). */
    346339        RTR0MEMOBJ                  pMemObjMSREntryLoad;
     
    585578        /** Current EPTP. */
    586579        RTHCPHYS                    GCPhysEPTP;
     580
     581        /** R0 memory object for the MSR bitmap (1 page). */
     582        RTR0MEMOBJ                  pMemObjMSRBitmap;
     583        /** Physical address of the MSR bitmap (1 page). */
     584        RTHCPHYS                    pMSRBitmapPhys;
     585        /** Virtual address of the MSR bitmap (1 page). */
     586        R0PTRTYPE(uint8_t *)        pMSRBitmap;
     587
     588        /** R0 memory object for the guest MSR load area (1 page). */
     589        RTR0MEMOBJ                  pMemObjGuestMSR;
     590        /** Physical address of the guest MSR load area (1 page). */
     591        RTHCPHYS                    pGuestMSRPhys;
     592        /** Virtual address of the guest MSR load area (1 page). */
     593        R0PTRTYPE(uint8_t *)        pGuestMSR;
     594
     595        /** R0 memory object for the MSR load area (1 page). */
     596        RTR0MEMOBJ                  pMemObjHostMSR;
     597        /** Physical address of the MSR load area (1 page). */
     598        RTHCPHYS                    pHostMSRPhys;
     599        /** Virtual address of the MSR load area (1 page). */
     600        R0PTRTYPE(uint8_t *)        pHostMSR;
    587601
    588602        /** VMCS cache. */
  • trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm

    r15962 r22040  
    232232    ; *
    233233    ; */
    234 
    235     ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    236     ;; @todo use the automatic load feature for MSRs
    237     LOADGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
    238 %if 0  ; not supported on Intel CPUs
    239     LOADGUESTMSR MSR_K8_CSTAR,          CPUMCTX.msrCSTAR
    240 %endif
    241     LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    242     LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    243     LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    244 
    245234%ifdef VBOX_WITH_CRASHDUMP_MAGIC
    246235    mov     qword [rbx + VMCSCACHE.uPos], 5
     
    308297
    309298    pop     rsi         ; pCtx (needed in rsi by the macros below)
    310 
    311     ;; @todo use the automatic load feature for MSRs
    312     SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    313299
    314300%ifdef VMX_USE_CACHED_VMCS_ACCESSES
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac

    r20996 r22040  
    414414%endif
    415415
    416     ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
    417     ;; @todo use the automatic load feature for MSRs
    418     LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    419 %if 0  ; not supported on Intel CPUs
    420     LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
    421 %endif
    422     LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
    423     LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
    424     LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    425 
    426416    ; Save the pCtx pointer
    427417    push    xSI
     
    531521    pop     xSI         ; pCtx (needed in rsi by the macros below)
    532522
    533     ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    534     ;; @todo use the automatic load feature for MSRs
    535     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    536     LOADHOSTMSR MSR_K8_SF_MASK
    537     LOADHOSTMSR MSR_K6_STAR
    538 %if 0  ; not supported on Intel CPUs
    539     LOADHOSTMSR MSR_K8_CSTAR
    540 %endif
    541     LOADHOSTMSR MSR_K8_LSTAR
    542 
    543523%ifdef VMX_USE_CACHED_VMCS_ACCESSES
    544524    pop     xDX         ; saved pCache
     
    589569    pop     xSI         ; pCtx (needed in rsi by the macros below)
    590570
    591     ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    592     ;; @todo use the automatic load feature for MSRs
    593     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    594     LOADHOSTMSR MSR_K8_SF_MASK
    595     LOADHOSTMSR MSR_K6_STAR
    596 %if 0  ; not supported on Intel CPUs
    597     LOADHOSTMSR MSR_K8_CSTAR
    598 %endif
    599     LOADHOSTMSR MSR_K8_LSTAR
    600 
    601571%ifdef VMX_USE_CACHED_VMCS_ACCESSES
    602572    add     xSP, xS     ; pCache
     
    622592
    623593    pop     xSI         ; pCtx (needed in rsi by the macros below)
    624 
    625     ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    626     ;; @todo use the automatic load feature for MSRs
    627     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    628     LOADHOSTMSR MSR_K8_SF_MASK
    629     LOADHOSTMSR MSR_K6_STAR
    630 %if 0  ; not supported on Intel CPUs
    631     LOADHOSTMSR MSR_K8_CSTAR
    632 %endif
    633     LOADHOSTMSR MSR_K8_LSTAR
    634594
    635595%ifdef VMX_USE_CACHED_VMCS_ACCESSES
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r21951 r22040  
    8383static bool vmxR0IsValidWriteField(uint32_t idxField);
    8484#endif
     85static void vmxR0SetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
    8586
    8687static void VMXR0CheckError(PVM pVM, PVMCPU pVCpu, int rc)
     
    198199    }
    199200
    200     /* Allocate the MSR bitmap if this feature is supported. */
    201     if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    202     {
    203         rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjMSRBitmap, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
    204         AssertRC(rc);
    205         if (RT_FAILURE(rc))
    206             return rc;
    207 
    208         pVM->hwaccm.s.vmx.pMSRBitmap     = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjMSRBitmap);
    209         pVM->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjMSRBitmap, 0);
    210         memset(pVM->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
    211     }
    212 
    213201#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    214202    {
     
    256244        pVCpu->hwaccm.s.vmx.pVAPICPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 0);
    257245        ASMMemZero32(pVCpu->hwaccm.s.vmx.pVAPIC, PAGE_SIZE);
     246
     247        /* Allocate the MSR bitmap if this feature is supported. */
     248        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     249        {
     250            rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
     251            AssertRC(rc);
     252            if (RT_FAILURE(rc))
     253                return rc;
     254
     255            pVCpu->hwaccm.s.vmx.pMSRBitmap     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap);
     256            pVCpu->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 0);
     257            memset(pVCpu->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
     258        }
     259
     260        /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */
     261        rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
     262        AssertRC(rc);
     263        if (RT_FAILURE(rc))
     264            return rc;
     265
     266        pVCpu->hwaccm.s.vmx.pGuestMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR);
     267        pVCpu->hwaccm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 0);
     268        memset(pVCpu->hwaccm.s.vmx.pGuestMSR, 0, PAGE_SIZE);
     269
     270        /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */
     271        rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
     272        AssertRC(rc);
     273        if (RT_FAILURE(rc))
     274            return rc;
     275
     276        pVCpu->hwaccm.s.vmx.pHostMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjHostMSR);
     277        pVCpu->hwaccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0);
     278        memset(pVCpu->hwaccm.s.vmx.pHostMSR, 0, PAGE_SIZE);
    258279
    259280        /* Current guest paging mode. */
     
    294315            pVCpu->hwaccm.s.vmx.pVAPICPhys   = 0;
    295316        }
     317        if (pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
     318        {
     319            RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, false);
     320            pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     321            pVCpu->hwaccm.s.vmx.pMSRBitmap       = 0;
     322            pVCpu->hwaccm.s.vmx.pMSRBitmapPhys   = 0;
     323        }
     324        if (pVCpu->hwaccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)
     325        {
     326            RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, false);
     327            pVCpu->hwaccm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;
     328            pVCpu->hwaccm.s.vmx.pHostMSR       = 0;
     329            pVCpu->hwaccm.s.vmx.pHostMSRPhys   = 0;
     330        }
     331        if (pVCpu->hwaccm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)
     332        {
     333            RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, false);
     334            pVCpu->hwaccm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;
     335            pVCpu->hwaccm.s.vmx.pGuestMSR       = 0;
     336            pVCpu->hwaccm.s.vmx.pGuestMSRPhys   = 0;
     337        }
    296338    }
    297339    if (pVM->hwaccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
     
    301343        pVM->hwaccm.s.vmx.pAPIC       = 0;
    302344        pVM->hwaccm.s.vmx.pAPICPhys   = 0;
    303     }
    304     if (pVM->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
    305     {
    306         RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjMSRBitmap, false);
    307         pVM->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    308         pVM->hwaccm.s.vmx.pMSRBitmap       = 0;
    309         pVM->hwaccm.s.vmx.pMSRBitmapPhys   = 0;
    310345    }
    311346#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    395430            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;
    396431
    397 #ifdef VBOX_WITH_VTX_MSR_BITMAPS
    398432        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    399433        {
    400             Assert(pVM->hwaccm.s.vmx.pMSRBitmapPhys);
     434            Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
    401435            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
    402436        }
    403 #endif
    404437
    405438        /* We will use the secondary control if it's present. */
     
    480513        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    481514        {
    482             /* Optional */
    483             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVM->hwaccm.s.vmx.pMSRBitmapPhys);
     515            Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
     516
     517            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
    484518            AssertRC(rc);
    485         }
    486 
    487         /* Clear MSR controls. */
    488         rc  = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0);
    489         rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0);
    490         rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0);
    491         rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
    492         rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0);
    493         AssertRC(rc);
    494 
     519
     520            /* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */
     521            vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
     522            vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
     523            vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
     524        }
     525
     526        /* Set the guest & host MSR load/store physical addresses. */
     527        Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     528        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     529        AssertRC(rc);
     530        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     531        AssertRC(rc);
     532
     533        Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys);
     534        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pHostMSRPhys);
     535        AssertRC(rc);
     536       
    495537        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    496538        {
     
    610652    VMXR0CheckError(pVM, &pVM->aCpus[0], rc);
    611653    return rc;
     654}
     655
     656/**
     657 * Sets the permission bits for the specified MSR
     658 *
     659 * @param   pVCpu       The VMCPU to operate on.
     660 * @param   ulMSR       MSR value
     661 * @param   fRead       Reading allowed/disallowed
     662 * @param   fWrite      Writing allowed/disallowed
     663 */
     664static void vmxR0SetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
     665{
     666    unsigned ulBit;
     667    uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap;
     668
     669    /* Layout:
     670     * 0x000 - 0x3ff - Low MSR read bits
     671     * 0x400 - 0x7ff - High MSR read bits
     672     * 0x800 - 0xbff - Low MSR write bits
     673     * 0xc00 - 0xfff - High MSR write bits
     674     */
     675    if (ulMSR <= 0x00001FFF)
     676    {
     677        /* Pentium-compatible MSRs */
     678        ulBit    = ulMSR;
     679    }
     680    else
     681    if (    ulMSR >= 0xC0000000
     682        &&  ulMSR <= 0xC0001FFF)
     683    {
     684        /* AMD Sixth Generation x86 Processor MSRs */
     685        ulBit = (ulMSR - 0xC0000000);
     686        pMSRBitmap += 0x400;
     687    }
     688    else
     689    {
     690        AssertFailed();
     691        return;
     692    }
     693
     694    Assert(ulBit <= 0x1fff);
     695    if (fRead)
     696        ASMBitClear(pMSRBitmap, ulBit);
     697    else
     698        ASMBitSet(pMSRBitmap, ulBit);
     699   
     700    if (fWrite)
     701        ASMBitClear(pMSRBitmap + 0x800, ulBit);
     702    else
     703        ASMBitSet(pMSRBitmap + 0x800, ulBit);
    612704}
    613705
     
    9921084        }
    9931085
    994 
    9951086        /* Save the base address of the TR selector. */
    9961087        if (SelTR > gdtr.cbGdt)
     
    10671158        AssertRC(rc);
    10681159
    1069 #if 0 /* @todo deal with 32/64 */
    1070         /* Restore the host EFER - on CPUs that support it. */
    1071         if (pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
    1072         {
    1073             uint64_t msrEFER = ASMRdMsr(MSR_IA32_EFER);
    1074             rc = VMXWriteVMCS64(VMX_VMCS_HOST_FIELD_EFER_FULL, msrEFER);
    1075             AssertRC(rc);
    1076         }
    1077 #endif
     1160        /* Store all host MSRs in the VM-Exit load area, so they will be reloaded after the world switch back to the host. */
     1161        PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR;
     1162        unsigned idxMsr = 0;
     1163
     1164        /* EFER MSR present? */
     1165        if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
     1166        {
     1167            if (ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP)
     1168            {
     1169                pMsr->u32IndexMSR = MSR_K6_STAR;
     1170                pMsr->u32Reserved = 0;
     1171                pMsr->u64Value    = ASMRdMsr(MSR_K6_STAR);                   /* legacy syscall eip, cs & ss */
     1172                pMsr++; idxMsr++;
     1173            }
     1174
     1175            pMsr->u32IndexMSR = MSR_K6_EFER;
     1176            pMsr->u32Reserved = 0;
     1177#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1178            if (CPUMIsGuestInLongMode(pVCpu))
     1179            {
     1180                /* Must match the efer value in our 64 bits switcher. */
     1181                pMsr->u64Value    = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
     1182            }
     1183            else
     1184#endif
     1185                pMsr->u64Value    = ASMRdMsr(MSR_K6_EFER);
     1186            pMsr++; idxMsr++;
     1187        }
     1188
     1189#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1190        if (VMX_IS_64BIT_HOST_MODE())
     1191        {
     1192            pMsr->u32IndexMSR = MSR_K8_LSTAR;
     1193            pMsr->u32Reserved = 0;
     1194            pMsr->u64Value    = ASMRdMsr(MSR_K8_LSTAR);             /* 64 bits mode syscall rip */
     1195            pMsr++; idxMsr++;
     1196            pMsr->u32IndexMSR = MSR_K8_SF_MASK;
     1197            pMsr->u32Reserved = 0;
     1198            pMsr->u64Value    = ASMRdMsr(MSR_K8_SF_MASK);           /* syscall flag mask */
     1199            pMsr++; idxMsr++;
     1200            pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
     1201            pMsr->u32Reserved = 0;
     1202            pMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);    /* swapgs exchange value */
     1203            pMsr++; idxMsr++;
     1204        }
     1205#endif
     1206        rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, idxMsr);
     1207        AssertRC(rc);
     1208
    10781209        pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
    10791210    }
     
    11751306    /* Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
    11761307    val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
    1177 #if 0 /* @todo deal with 32/64 */
    1178     /* Required for the EFER write below, not supported on all CPUs. */
    1179     val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR;
    1180 #endif
    11811308    /* 64 bits guest mode? */
    11821309    if (CPUMIsGuestInLongModeEx(pCtx))
     
    11951322
    11961323    /* Save debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
    1197 #if 0 /* @todo deal with 32/64 */
    1198     val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG | VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR;
    1199 #else
    12001324    val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
    1201 #endif
    12021325
    12031326#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     
    17081831    }
    17091832
    1710 #if 0 /* @todo deal with 32/64 */
    1711     /* Unconditionally update the guest EFER - on CPUs that supports it. */
    1712     if (pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
    1713     {
    1714         rc = VMXWriteVMCS64(VMX_VMCS_GUEST_EFER_FULL, pCtx->msrEFER);
    1715         AssertRC(rc);
    1716     }
    1717 #endif
    1718 
    17191833    vmxR0UpdateExceptionBitmap(pVM, pVCpu, pCtx);
     1834
     1835    if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     1836    {
     1837        /* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */
     1838        vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
     1839        vmxR0SetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
     1840        vmxR0SetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
     1841        vmxR0SetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
     1842    }
     1843
     1844    /* Store all guest MSRs in the VM-Entry load area, so they will be loaded during the world switch. */
     1845    PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
     1846    unsigned idxMsr = 0;
     1847
     1848    pMsr->u32IndexMSR = MSR_K6_EFER;
     1849    pMsr->u32Reserved = 0;
     1850    pMsr->u64Value    = pCtx->msrEFER;
     1851    /* VT-x will complain if only MSR_K6_EFER_LME is set. */
     1852    if (!CPUMIsGuestInLongModeEx(pCtx))
     1853        pMsr->u64Value &= ~(MSR_K6_EFER_LMA|MSR_K6_EFER_LME);
     1854
     1855    pMsr++; idxMsr++;
     1856    pMsr->u32IndexMSR = MSR_K8_LSTAR;
     1857    pMsr->u32Reserved = 0;
     1858    pMsr->u64Value    = pCtx->msrLSTAR;           /* 64 bits mode syscall rip */
     1859    pMsr++; idxMsr++;
     1860    pMsr->u32IndexMSR = MSR_K6_STAR;
     1861    pMsr->u32Reserved = 0;
     1862    pMsr->u64Value    = pCtx->msrSTAR;            /* legacy syscall eip, cs & ss */
     1863    pMsr++; idxMsr++;
     1864    pMsr->u32IndexMSR = MSR_K8_SF_MASK;
     1865    pMsr->u32Reserved = 0;
     1866    pMsr->u64Value    = pCtx->msrSFMASK;          /* syscall flag mask */
     1867    pMsr++; idxMsr++;
     1868    pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
     1869    pMsr->u32Reserved = 0;
     1870    pMsr->u64Value    = pCtx->msrKERNELGSBASE;    /* swapgs exchange value */
     1871    pMsr++; idxMsr++;
     1872
     1873    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr);
     1874    AssertRC(rc);
     1875
     1876    rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, idxMsr);
     1877    AssertRC(rc);
    17201878
    17211879    /* Done. */
     
    18472005        /* In real mode we have a fake TSS, so only sync it back when it's supposed to be valid. */
    18482006        VMX_READ_SELREG(TR, tr);
     2007    }
     2008
     2009    uint32_t maxMsr = 0;
     2010    rc = VMXReadVMCS32(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, &maxMsr);
     2011    AssertRC(rc);
     2012
     2013    /* Save the possibly changed MSRs that we automatically restore and save during a world switch. */
     2014    for (unsigned i = 0; i < maxMsr; i++)
     2015    {
     2016        PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
     2017        pMsr += i;
     2018
     2019        switch (pMsr->u32IndexMSR)
     2020        {
     2021        case MSR_K8_LSTAR:
     2022            pCtx->msrLSTAR = pMsr->u64Value;
     2023            break;
     2024        case MSR_K6_STAR:
     2025            pCtx->msrSTAR = pMsr->u64Value;
     2026            break;
     2027        case MSR_K8_SF_MASK:
     2028            pCtx->msrSFMASK = pMsr->u64Value;
     2029            break;
     2030        case MSR_K8_KERNEL_GS_BASE:
     2031            pCtx->msrKERNELGSBASE = pMsr->u64Value;
     2032            break;
     2033        case MSR_K6_EFER:
     2034            /* EFER can't be changed without causing a VM-exit. */
     2035//            Assert(pCtx->msrEFER == pMsr->u64Value);
     2036            break;
     2037        default:
     2038            AssertFailed();
     2039            return VERR_INTERNAL_ERROR;
     2040        }
    18492041    }
    18502042    return VINF_SUCCESS;
     
    20412233#ifdef VBOX_STRICT
    20422234    RTCPUID     idCpuCheck;
     2235    bool        fWasInLongMode = false;
    20432236#endif
    20442237#ifdef VBOX_HIGH_RES_TIMERS_HACK_IN_RING0
     
    21232316            Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
    21242317    }
     2318    fWasInLongMode = CPUMIsGuestInLongMode(pVCpu);
    21252319#endif
    21262320
     
    21402334               (int)pVCpu->hwaccm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));
    21412335    Assert(!HWACCMR0SuspendPending());
     2336    /* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */
     2337    Assert(fWasInLongMode == CPUMIsGuestInLongMode(pVCpu));
    21422338
    21432339    /* Safety precaution; looping for too long here can have a very bad effect on the host */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette