VirtualBox

Ignore:
Timestamp:
Nov 26, 2013 3:53:35 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: MSR optimizations for world-switch of 64-bit guests.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49580 r49664  
    8989#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR      RT_BIT(14)
    9090#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  RT_BIT(15)
    91 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE        RT_BIT(16)
    92 #define HMVMX_UPDATED_GUEST_APIC_STATE            RT_BIT(17)
     91#define HMVMX_UPDATED_GUEST_LAZY_MSRS             RT_BIT(16)
     92#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE        RT_BIT(17)
     93#define HMVMX_UPDATED_GUEST_APIC_STATE            RT_BIT(18)
    9394#define HMVMX_UPDATED_GUEST_ALL                   (  HMVMX_UPDATED_GUEST_RIP                   \
    9495                                                   | HMVMX_UPDATED_GUEST_RSP                   \
     
    107108                                                   | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR      \
    108109                                                   | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  \
     110                                                   | HMVMX_UPDATED_GUEST_LAZY_MSRS             \
    109111                                                   | HMVMX_UPDATED_GUEST_ACTIVITY_STATE        \
    110112                                                   | HMVMX_UPDATED_GUEST_APIC_STATE)
     
    13761378 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
    13771379 *
    1378  * @param   pVCpu           Pointer to the VMCPU.
     1380 * @param   pVCpu       Pointer to the VMCPU.
     1381 *
     1382 * @remarks No-long-jump zone!!!
    13791383 */
    13801384static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
     
    13951399
    13961400
     1401#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1402/**
     1403 * Saves a set of host MSRs to allow read/write passthru access to the guest and
     1404 * perform lazy restoration of the host MSRs while leaving VT-x.
     1405 *
     1406 * @param   pVCpu       Pointer to the VMCPU.
     1407 *
     1408 * @remarks No-long-jump zone!!!
     1409 */
     1410static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
     1411{
     1412    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1413
     1414#define VMXLOCAL_SAVE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \
     1415    do { \
     1416        if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag))) \
     1417        { \
     1418            pVCpu->hm.s.vmx.u64Host##a_HostMsrField = ASMRdMsr(uMsr); \
     1419            Log4(("hmR0VmxLazySaveHostMsrs: uMsr=%#RX32 HostValue=%#RX64\n", (uMsr), pVCpu->hm.s.vmx.u64Host##a_HostMsrField)); \
     1420        } \
     1421    } while (0)
     1422
     1423    /*
     1424     * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
     1425     */
     1426    VMXLOCAL_SAVE_HOST_MSR(MSR_K8_LSTAR,          LStarMsr,        VMX_RESTORE_HOST_MSR_LSTAR);
     1427    VMXLOCAL_SAVE_HOST_MSR(MSR_K6_STAR,           StarMsr,         VMX_RESTORE_HOST_MSR_STAR);
     1428    VMXLOCAL_SAVE_HOST_MSR(MSR_K8_SF_MASK,        SFMaskMsr,       VMX_RESTORE_HOST_MSR_SFMASK);
     1429    VMXLOCAL_SAVE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
     1430#undef VMXLOCAL_SAVE_HOST_MSR
     1431}
     1432
     1433
     1434/**
     1435 * Checks whether the MSR belongs to the set of guest MSRs that we restore
     1436 * lazily while leaving VT-x.
     1437 *
     1438 * @returns true if it does, false otherwise.
     1439 * @param   pVCpu       Pointer to the VMCPU.
     1440 * @param   uMsr        The MSR to check.
     1441 */
     1442static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
     1443{
     1444    NOREF(pVCpu);
     1445    switch (uMsr)
     1446    {
     1447        case MSR_K8_LSTAR:
     1448        case MSR_K6_STAR:
     1449        case MSR_K8_SF_MASK:
     1450        case MSR_K8_KERNEL_GS_BASE:
     1451            return true;
     1452    }
     1453    return false;
     1454}
     1455
     1456
     1457/**
     1458 * Saves a set of guests MSRs back into the guest-CPU context.
     1459 *
     1460 * @param   pVCpu       Pointer to the VMCPU.
     1461 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     1462 *                      out-of-sync. Make sure to update the required fields
     1463 *                      before using them.
     1464 *
     1465 * @remarks No-long-jump zone!!!
     1466 */
     1467static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     1468{
     1469    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1470#define VMXLOCAL_SAVE_GUEST_MSR(uMsr, a_GuestMsrField, RestoreFlag) \
     1471    do { \
     1472        if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \
     1473        { \
     1474            pMixedCtx->msr##a_GuestMsrField = ASMRdMsr(uMsr); \
     1475            Log4(("hmR0VmxLazySaveGuestMsrs: uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \
     1476        } \
     1477    } while (0)
     1478
     1479    VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_LSTAR,          LSTAR,        VMX_RESTORE_HOST_MSR_LSTAR);
     1480    VMXLOCAL_SAVE_GUEST_MSR(MSR_K6_STAR,           STAR,         VMX_RESTORE_HOST_MSR_STAR);
     1481    VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_SF_MASK,        SFMASK,       VMX_RESTORE_HOST_MSR_SFMASK);
     1482    VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
     1483
     1484#undef VMXLOCAL_SAVE_GUEST_MSR
     1485}
     1486
     1487
     1488/**
     1489 * Loads a set of guests MSRs to allow read/passthru to the guest.
     1490 *
     1491 * The name of this function is slightly confusing. This function does NOT
     1492 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
     1493 * common prefix for functions dealing with "lazy restoration" of the shared
     1494 * MSRs.
     1495 *
     1496 * @param   pVCpu       Pointer to the VMCPU.
     1497 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     1498 *                      out-of-sync. Make sure to update the required fields
     1499 *                      before using them.
     1500 *
     1501 * @remarks No-long-jump zone!!!
     1502 */
     1503static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     1504{
     1505    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1506
     1507#define VMXLOCAL_LOAD_GUEST_MSR(uMsr, a_GuestMsrField, a_HostMsrField, RestoreFlag) \
     1508    do { \
     1509        if (   (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \
     1510            || pMixedCtx->msr##a_GuestMsrField != pVCpu->hm.s.vmx.u64Host##a_HostMsrField) \
     1511        { \
     1512            ASMWrMsr((uMsr), pMixedCtx->msr##a_GuestMsrField); \
     1513        } \
     1514        pVCpu->hm.s.vmx.fRestoreHostMsrs |= (RestoreFlag); \
     1515        Log4(("Load: MSRSWAP uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \
     1516    } while (0)
     1517
     1518    VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_LSTAR,          LSTAR,        LStarMsr,        VMX_RESTORE_HOST_MSR_LSTAR);
     1519    VMXLOCAL_LOAD_GUEST_MSR(MSR_K6_STAR,           STAR,         StarMsr,         VMX_RESTORE_HOST_MSR_STAR);
     1520    VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_SF_MASK,        SFMASK,       SFMaskMsr,       VMX_RESTORE_HOST_MSR_SFMASK);
     1521    VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
     1522
     1523#undef VMXLOCAL_LOAD_GUEST_MSR
     1524}
     1525
     1526
     1527/**
     1528 * Performs lazy restoration of the set of host MSRs if they were previously
     1529 * loaded with guest MSR values.
     1530 *
     1531 * @param   pVCpu       Pointer to the VMCPU.
     1532 *
     1533 * @remarks No-long-jump zone!!!
     1534 * @remarks The guest MSRs should have been saved back into the guest-CPU
     1535 *          context by hmR0VmxSaveGuestLazyMsrs()!!!
     1536 */
     1537static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
     1538{
     1539    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1540
     1541#define VMXLOCAL_RESTORE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \
     1542    do { \
     1543        if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \
     1544        { \
     1545            ASMWrMsr((uMsr), pVCpu->hm.s.vmx.u64Host##a_HostMsrField); \
     1546            pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(RestoreFlag); \
     1547            Log4(("hmR0VmxLazyRestoreHostMsrs: uMsr=%#RX32 HostValue=%#RX64\n", (uMsr), \
     1548                 pVCpu->hm.s.vmx.u64Host##a_HostMsrField)); \
     1549        } \
     1550    } while (0)
     1551
     1552    VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_LSTAR,          LStarMsr,        VMX_RESTORE_HOST_MSR_LSTAR);
     1553    VMXLOCAL_RESTORE_HOST_MSR(MSR_K6_STAR,           StarMsr,         VMX_RESTORE_HOST_MSR_STAR);
     1554    VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_SF_MASK,        SFMaskMsr,       VMX_RESTORE_HOST_MSR_SFMASK);
     1555    VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
     1556
     1557#undef VMXLOCAL_RESTORE_HOST_MSR
     1558}
     1559#endif  /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     1560
     1561
    13971562#ifdef VBOX_STRICT
    13981563/**
     
    14001565 * VMCS are correct.
    14011566 *
    1402  * @param   pVCpu           Pointer to the VMCPU.
     1567 * @param   pVCpu       Pointer to the VMCPU.
    14031568 */
    14041569static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
     
    21712336        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    21722337        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     2338
     2339#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2340        /*
     2341         * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
     2342         */
     2343        if (   HMVMX_IS_64BIT_HOST_MODE()
     2344            && pVM->hm.s.fAllow64BitGuests)
     2345        {
     2346            hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR,          VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     2347            hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR,           VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     2348            hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     2349            hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     2350        }
     2351#endif
    21732352    }
    21742353
     
    28233002 * @param   pVM         Pointer to the VM.
    28243003 * @param   pVCpu       Pointer to the VMCPU.
     3004 *
     3005 * @remarks No-long-jump zone!!!
    28253006 */
    28263007DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
     
    28323013
    28333014    int rc = VINF_SUCCESS;
    2834 #if 0
    2835     PVMXAUTOMSR  pHostMsr       = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    2836     uint32_t     cHostMsrs      = 0;
    2837     uint32_t     u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
    2838 
    2839     if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    2840     {
    2841         uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
    2842 
    2843 # if HC_ARCH_BITS == 64
    2844         /* Paranoia. 64-bit code requires these bits to be set always. */
    2845         Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
    2846 
    2847         /*
    2848          * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation.
    2849          * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for
    2850          * some reason (e.g. allow transparent reads) we would activate the code below.
    2851          */
    2852 #  if 0
    2853         /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */
    2854         Assert(u64HostEfer & (MSR_K6_EFER_NXE));
    2855         /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has.
    2856            See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */
    2857         if (CPUMIsGuestInLongMode(pVCpu))
    2858         {
    2859             uint64_t u64GuestEfer;
    2860             rc = CPUMQueryGuestMsr(pVCpu, MSR_K6_EFER, &u64GuestEfer);
    2861             AssertRC(rc);
    2862 
    2863             if ((u64HostEfer & MSR_K6_EFER_SCE) != (u64GuestEfer & MSR_K6_EFER_SCE))
    2864             {
    2865                 pHostMsr->u32Msr      = MSR_K6_EFER;
    2866                 pHostMsr->u32Reserved = 0;
    2867                 pHostMsr->u64Value    = u64HostEfer;
    2868                 pHostMsr++; cHostMsrs++;
    2869             }
    2870         }
    2871 #  endif
    2872 # else  /* HC_ARCH_BITS != 64 */
    2873         pHostMsr->u32Msr      = MSR_K6_EFER;
    2874         pHostMsr->u32Reserved = 0;
    2875 # if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    2876         if (CPUMIsGuestInLongMode(pVCpu))
    2877         {
    2878             /* Must match the EFER value in our 64-bit switcher. */
    2879             pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
    2880         }
    2881         else
    2882 #  endif
    2883             pHostMsr->u64Value = u64HostEfer;
    2884         pHostMsr++; cHostMsrs++;
    2885 # endif  /* HC_ARCH_BITS == 64 */
    2886     }
    2887 
    2888 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    2889     if (HMVMX_IS_64BIT_HOST_MODE())
    2890     {
    2891         pHostMsr->u32Msr       = MSR_K6_STAR;
    2892         pHostMsr->u32Reserved  = 0;
    2893         pHostMsr->u64Value     = ASMRdMsr(MSR_K6_STAR);              /* legacy syscall eip, cs & ss */
    2894         pHostMsr++; cHostMsrs++;
    2895         pHostMsr->u32Msr       = MSR_K8_LSTAR;
    2896         pHostMsr->u32Reserved  = 0;
    2897         pHostMsr->u64Value     = ASMRdMsr(MSR_K8_LSTAR);             /* 64-bit mode syscall rip */
    2898         pHostMsr++; cHostMsrs++;
    2899         pHostMsr->u32Msr       = MSR_K8_SF_MASK;
    2900         pHostMsr->u32Reserved  = 0;
    2901         pHostMsr->u64Value     = ASMRdMsr(MSR_K8_SF_MASK);           /* syscall flag mask */
    2902         pHostMsr++; cHostMsrs++;
    2903         pHostMsr->u32Msr      = MSR_K8_KERNEL_GS_BASE;
    2904         pHostMsr->u32Reserved = 0;
    2905         pHostMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);     /* swapgs exchange value */
    2906         pHostMsr++; cHostMsrs++;
    2907     }
    2908 # endif
    2909 
    2910     /* Host TSC AUX MSR must be restored since we always load/store guest TSC AUX MSR. */
    2911     if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    2912     {
    2913         pHostMsr->u32Msr      = MSR_K8_TSC_AUX;
    2914         pHostMsr->u32Reserved = 0;
    2915         pHostMsr->u64Value    = ASMRdMsr(MSR_K8_TSC_AUX);
    2916         pHostMsr++; cHostMsrs++;
    2917     }
    2918 
    2919     /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    2920     if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc)))
    2921     {
    2922         LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc)));
    2923         pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_HOST_MSR_STORAGE;
    2924         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2925     }
    2926 
    2927     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
     3015#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3016    if (   HMVMX_IS_64BIT_HOST_MODE()
     3017        && pVM->hm.s.fAllow64BitGuests)
     3018    {
     3019        hmR0VmxLazySaveHostMsrs(pVCpu);
     3020    }
    29283021#endif
    29293022
     
    43594452
    43604453    /*
    4361      * Shared MSRs that we use the auto-load/store MSR area in the VMCS.
     4454     * MSRs that we use the auto-load/store MSR area in the VMCS.
    43624455     */
    4363     int rc = VINF_SUCCESS;
     4456    PVM pVM = pVCpu->CTX_SUFF(pVM);
    43644457    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
    43654458    {
    4366         /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
    4367         PVM pVM = pVCpu->CTX_SUFF(pVM);
    4368         bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
    4369         if (fSupportsLongMode)
    4370         {
     4459        if (pVM->hm.s.fAllow64BitGuests)
     4460        {
     4461#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    43714462            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false /* fUpdateHostMsr */);
    43724463            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false /* fUpdateHostMsr */);
    43734464            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false /* fUpdateHostMsr */);
    43744465            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
    4375         }
    4376 
    43774466# ifdef DEBUG
    4378         PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    4379         for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
    4380             Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
     4467            PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     4468            for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
     4469                Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
    43814470# endif
     4471#endif
     4472        }
    43824473        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    43834474    }
     
    43904481    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
    43914482    {
    4392         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);      AssertRCReturn(rc, rc);
     4483        int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);      AssertRCReturn(rc, rc);
    43934484        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
    43944485    }
     
    43964487    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
    43974488    {
    4398         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);    AssertRCReturn(rc, rc);
     4489        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);    AssertRCReturn(rc, rc);
    43994490        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
    44004491    }
     
    44024493    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
    44034494    {
    4404         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);    AssertRCReturn(rc, rc);
     4495        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);    AssertRCReturn(rc, rc);
    44054496        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    44064497    }
    44074498
    4408     return rc;
     4499    return VINF_SUCCESS;
    44094500}
    44104501
     
    47054796                {
    47064797                    Log4(("MSR_K6_EFER            = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
     4798                    Log4(("MSR_K8_CSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
     4799                    Log4(("MSR_K8_LSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
    47074800                    Log4(("MSR_K6_STAR            = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
    4708                     Log4(("MSR_K8_LSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
    4709                     Log4(("MSR_K8_CSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
    47104801                    Log4(("MSR_K8_SF_MASK         = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
    47114802                    Log4(("MSR_K8_KERNEL_GS_BASE  = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
     
    57395830
    57405831/**
    5741  * Saves the auto load/store'd guest MSRs from the current VMCS into the
    5742  * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
    5743  * and TSC_AUX.
     5832 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
     5833 * the CPU back into the guest-CPU context.
    57445834 *
    57455835 * @returns VBox status code.
     
    57515841 * @remarks No-long-jump zone!!!
    57525842 */
     5843static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5844{
     5845    if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LAZY_MSRS)
     5846        return VINF_SUCCESS;
     5847
     5848#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     5849    if (   HMVMX_IS_64BIT_HOST_MODE()
     5850        && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     5851    {
     5852        /* We should not get preempted to a different CPU at this point while reading the MSRs. */
     5853        VMMRZCallRing3Disable(pVCpu);
     5854        HM_DISABLE_PREEMPT_IF_NEEDED();
     5855
     5856        hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
     5857
     5858        HM_RESTORE_PREEMPT_IF_NEEDED();
     5859        VMMRZCallRing3Enable(pVCpu);
     5860    }
     5861#endif
     5862
     5863    pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS;
     5864    return VINF_SUCCESS;
     5865}
     5866
     5867
     5868/**
     5869 * Saves the auto load/store'd guest MSRs from the current VMCS into
     5870 * the guest-CPU context.
     5871 *
     5872 * @returns VBox status code.
     5873 * @param   pVCpu       Pointer to the VMCPU.
     5874 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     5875 *                      out-of-sync. Make sure to update the required fields
     5876 *                      before using them.
     5877 *
     5878 * @remarks No-long-jump zone!!!
     5879 */
    57535880static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    57545881{
     
    57565883        return VINF_SUCCESS;
    57575884
    5758     PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    5759     Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", pVCpu->hm.s.vmx.cMsrs));
    5760     for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
     5885    PVMXAUTOMSR pMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     5886    uint32_t    cMsrs = pVCpu->hm.s.vmx.cMsrs;
     5887    Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
     5888    for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
    57615889    {
    57625890        switch (pMsr->u32Msr)
    57635891        {
     5892            case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
    57645893            case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR        = pMsr->u64Value;             break;
    57655894            case MSR_K6_STAR:           pMixedCtx->msrSTAR         = pMsr->u64Value;             break;
    57665895            case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK       = pMsr->u64Value;             break;
    57675896            case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;             break;
    5768             case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
    57695897            default:
    57705898            {
     
    61766304    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    61776305
     6306    rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
     6307    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     6308
    61786309    rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    61796310    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     
    64446575 *
    64456576 * @remarks If you modify code here, make sure to check whether
    6446  *          hmR0VmxCallRing3Callback() needs to be updated too.
     6577 *          hmR0VmxCallRing3Callback() needs to be updated too!!!
    64476578 * @remarks No-long-jmp zone!!!
    64486579 */
     
    64966627        VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
    64976628        pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
     6629    }
     6630#endif
     6631
     6632#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     6633    /* Restore the host MSRs as we're leaving VT-x context. */
     6634    if (   HMVMX_IS_64BIT_HOST_MODE()
     6635        && pVM->hm.s.fAllow64BitGuests
     6636        && pVCpu->hm.s.vmx.fRestoreHostMsrs)
     6637    {
     6638        /* We shouldn't reload the guest MSRs without saving it first. */
     6639        if (!fSaveGuestState)
     6640        {
     6641            int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
     6642            AssertRCReturn(rc, rc);
     6643        }
     6644        Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LAZY_MSRS);
     6645        hmR0VmxLazyRestoreHostMsrs(pVCpu);
     6646        Assert(!pVCpu->hm.s.vmx.fRestoreHostMsrs);
    64986647    }
    64996648#endif
     
    65106659    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    65116660
    6512     /** @todo This kinda defeats the purpose of having preemption hooks.
     6661    /** @todo This partially defeats the purpose of having preemption hooks.
    65136662     *  The problem is, deregistering the hooks should be moved to a place that
    65146663     *  lasts until the EMT is about to be destroyed not everytime while leaving HM
     
    66806829 *                          may be out-of-sync. Make sure to update the required
    66816830 *                          fields before using them.
     6831 *
    66826832 * @remarks If you modify code here, make sure to check whether
    6683  *          hmR0VmxLeave() needs to be updated too.
     6833 *          hmR0VmxLeave() needs to be updated too!!!
    66846834 */
    66856835DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
     
    66876837    if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
    66886838    {
     6839        /* If anything here asserts or fails, good luck. */
    66896840        VMMRZCallRing3RemoveNotification(pVCpu);
     6841        VMMRZCallRing3Disable(pVCpu);
    66906842        HM_DISABLE_PREEMPT_IF_NEEDED();
    66916843
    6692         /* If anything here asserts or fails, good luck. */
     6844        PVM pVM = pVCpu->CTX_SUFF(pVM);
    66936845        if (CPUMIsGuestFPUStateActive(pVCpu))
    6694             CPUMR0SaveGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
     6846            CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
    66956847
    66966848        CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
     
    67036855            VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
    67046856            pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
     6857        }
     6858#endif
     6859
     6860#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     6861        /* Restore the host MSRs as we're leaving VT-x context. */
     6862        if (   HMVMX_IS_64BIT_HOST_MODE()
     6863            && pVM->hm.s.fAllow64BitGuests
     6864            && pVCpu->hm.s.vmx.fRestoreHostMsrs)
     6865        {
     6866            hmR0VmxLazyRestoreHostMsrs(pVCpu);
    67056867        }
    67066868#endif
     
    76537815    }
    76547816
     7817    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
     7818    {
     7819#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     7820        if (   HMVMX_IS_64BIT_HOST_MODE()
     7821            && pVM->hm.s.fAllow64BitGuests)
     7822        {
     7823            hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
     7824        }
     7825#endif
     7826        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     7827    }
     7828
    76557829    AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
    76567830              ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
     
    79078081
    79088082    /*
    7909      * Load the state shared between host and guest (FPU, debug).
     8083     * Load the state shared between host and guest (FPU, debug, lazy MSRs).
    79108084     */
    79118085    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
     
    80058179    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    80068180    {
    8007         if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    8008         {
    8009             /* VT-x restored the host TSC_AUX MSR for us, update the guest value from the VMCS area
    8010                if it could have changed without causing a VM-exit. */
    8011             if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    8012             {
    8013                 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    8014                 AssertRC(rc2);
    8015             }
    8016         }
    8017 
    80188181        /** @todo Find a way to fix hardcoding a guestimate.  */
    80198182        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
     
    97529915    rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    97539916    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
     9917    {
     9918        rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
    97549919        rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
     9920    }
    97559921    AssertRCReturn(rc, rc);
    97569922    Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx));
     
    97949960    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    97959961    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
     9962    {
     9963        rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
    97969964        rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
     9965    }
    97979966    AssertRCReturn(rc, rc);
    97989967    Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
     
    983210001                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
    983310002                        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     10003#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     10004                    else if (   HMVMX_IS_64BIT_HOST_MODE()
     10005                             && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
     10006                    {
     10007                        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     10008                    }
     10009#endif
    983410010                    break;
    983510011                }
     
    985210028                }
    985310029
    9854                 /* Writes to MSRs that are part of the auto-load/store are shouldn't cause VM-exits
    9855                    when MSR-bitmaps are supported. */
     10030                /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
    985610031                default:
    985710032                {
     
    986210037                        HMVMX_RETURN_UNEXPECTED_EXIT();
    986310038                    }
     10039
     10040#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     10041                    if (   HMVMX_IS_64BIT_HOST_MODE()
     10042                        && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
     10043                    {
     10044                        AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
     10045                        HMVMX_RETURN_UNEXPECTED_EXIT();
     10046                    }
     10047#endif
    986410048                    break;
    986510049                }
     
    1008310267    if (fIOString)
    1008410268    {
    10085 #if 0       /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158}*/
     10269#if 0       /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158} */
    1008610270        /*
    1008710271         * INS/OUTS - I/O String instruction.
     
    1047110655
    1047210656    /*
    10473      * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
    10474      * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update the segment registers and DR7 from the CPU.
     10657     * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
     10658     * Update the segment registers and DR7 from the CPU.
    1047510659     */
    1047610660    rc  = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     
    1062610810    }
    1062710811
    10628     Log4(("EPT return to ring-3 rc=%d\n"));
     10812    Log4(("EPT return to ring-3 rc=%Rrc\n", rc));
    1062910813    return rc;
    1063010814}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette