VirtualBox

Changeset 46294 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 28, 2013 12:07:56 PM (12 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Cleanup, and fixme todo.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r46289 r46294  
    23252325        uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
    23262326
    2327 #if HC_ARCH_BITS == 64
     2327# if HC_ARCH_BITS == 64
    23282328        /* Paranoia. 64-bit code requires these bits to be set always. */
    23292329        Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
     2330
     2331        /*
     2332         * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation.
     2333         * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for
     2334         * some reason (e.g. allow transparent reads) we would activate the code below.
     2335         */
     2336#  if 0
    23302337        /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */
    23312338        Assert(u64HostEfer & (MSR_K6_EFER_NXE));
    2332 
    23332339        /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has.
    23342340           See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */
     
    23472353            }
    23482354        }
    2349 #else  /* HC_ARCH_BITS != 64 */
     2355#  endif
     2356# else  /* HC_ARCH_BITS != 64 */
    23502357        pHostMsr->u32IndexMSR = MSR_K6_EFER;
    23512358        pHostMsr->u32Reserved = 0;
     
    23572364        }
    23582365        else
    2359 # endif
     2366#  endif
    23602367            pHostMsr->u64Value = u64HostEfer;
    23612368        pHostMsr++; cHostMsrs++;
    2362 #endif  /* HC_ARCH_BITS == 64 */
     2369# endif  /* HC_ARCH_BITS == 64 */
    23632370    }
    23642371
     
    24002407    rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,        ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    24012408    AssertRCReturn(rc, rc);
    2402 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     2409#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    24032410    if (HMVMX_IS_64BIT_HOST_MODE())
    24042411    {
     
    24132420        rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP,     ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    24142421    }
    2415 # elif HC_ARCH_BITS == 32
     2422#elif HC_ARCH_BITS == 32
    24162423    rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,         ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    24172424    AssertRCReturn(rc, rc);
    24182425    rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP,         ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    2419 # else
     2426#else
    24202427    rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP,         ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    24212428    AssertRCReturn(rc, rc);
    24222429    rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP,         ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    2423 # endif
     2430#endif
    24242431    AssertRCReturn(rc, rc);
    24252432
     
    37423749
    37433750        /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
    3744         const bool fSupportsNX       = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    3745         const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
    3746         if (fSupportsNX || fSupportsLongMode)
    3747         {
    3748             /** @todo support save IA32_EFER, i.e.
    3749              *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, in which case the
    3750              *        guest EFER need not be part of the VM-entry MSR-load area. Also
    3751              *        allow the guest to read EFER without causing a VM-exit when
    3752              *        possible. */
    3753             /* Do -not- load guest EFER as we don't save/restore the host EFER always. See hmr0VmxSaveHostMsrs() */
    3754 #if 0
    3755             pGuestMsr->u32IndexMSR = MSR_K6_EFER;
     3751        /** @todo r=ramshankar: Optimize this further to do lazy restoration and only
     3752         *        when the guest really is in 64-bit mode. */
     3753        bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
     3754        if (fSupportsLongMode)
     3755        {
     3756            pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
    37563757            pGuestMsr->u32Reserved = 0;
    3757             pGuestMsr->u64Value    = pMixedCtx->msrEFER;
    3758             /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
    3759             if (!CPUMIsGuestInLongModeEx(pMixedCtx))
    3760                 pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
     3758            pGuestMsr->u64Value    = pMixedCtx->msrLSTAR;           /* 64 bits mode syscall rip */
    37613759            pGuestMsr++; cGuestMsrs++;
    3762 #endif
    3763             if (fSupportsLongMode)
    3764             {
    3765                 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
    3766                 pGuestMsr->u32Reserved = 0;
    3767                 pGuestMsr->u64Value    = pMixedCtx->msrLSTAR;           /* 64 bits mode syscall rip */
    3768                 pGuestMsr++; cGuestMsrs++;
    3769                 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
    3770                 pGuestMsr->u32Reserved = 0;
    3771                 pGuestMsr->u64Value    = pMixedCtx->msrSTAR;            /* legacy syscall eip, cs & ss */
    3772                 pGuestMsr++; cGuestMsrs++;
    3773                 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
    3774                 pGuestMsr->u32Reserved = 0;
    3775                 pGuestMsr->u64Value    = pMixedCtx->msrSFMASK;          /* syscall flag mask */
    3776                 pGuestMsr++; cGuestMsrs++;
    3777                 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    3778                 pGuestMsr->u32Reserved = 0;
    3779                 pGuestMsr->u64Value    = pMixedCtx->msrKERNELGSBASE;    /* swapgs exchange value */
    3780                 pGuestMsr++; cGuestMsrs++;
    3781             }
     3760            pGuestMsr->u32IndexMSR = MSR_K6_STAR;
     3761            pGuestMsr->u32Reserved = 0;
     3762            pGuestMsr->u64Value    = pMixedCtx->msrSTAR;            /* legacy syscall eip, cs & ss */
     3763            pGuestMsr++; cGuestMsrs++;
     3764            pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
     3765            pGuestMsr->u32Reserved = 0;
     3766            pGuestMsr->u64Value    = pMixedCtx->msrSFMASK;          /* syscall flag mask */
     3767            pGuestMsr++; cGuestMsrs++;
     3768            pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
     3769            pGuestMsr->u32Reserved = 0;
     3770            pGuestMsr->u64Value    = pMixedCtx->msrKERNELGSBASE;    /* swapgs exchange value */
     3771            pGuestMsr++; cGuestMsrs++;
    37823772        }
    37833773
     
    65326522    if (pVCpu->hm.s.vmx.fRestoreHostFlags)
    65336523    {
     6524#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     6525        /** @todo r=ramshankar: This is broken when
     6526         *        VBOX_WITH_VMMR0_DISABLE_PREEMPTION is not defined. As
     6527         *        VMXRestoreHostState() may unconditionally enables interrupts. */
     6528#error "VMM: Fix Me! Make VMXRestoreHostState() function to skip cli/sti."
     6529#else
     6530        Assert(ASMIntAreEnabled());
    65346531        VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
     6532#endif
    65356533        pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
    65366534    }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette