VirtualBox

Changeset 46504 in vbox


Ignore:
Timestamp:
Jun 11, 2013 5:34:21 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: AMD-V bits.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46500 r46504  
    9494PHYSICAL_TABLE and AVIC LOGICAL_TABLE Pointers). */
    9595#define HMSVM_VMCB_CLEAN_AVIC                   RT_BIT(11)
     96/** Mask of all valid VMCB Clean bits. */
     97#define HMSVM_VMCB_CLEAN_ALL                    (  HMSVM_VMCB_CLEAN_INTERCEPTS
     98                                                 | HMSVM_VMCB_CLEAN_IOPM_MSRPM
     99                                                 | HMSVM_VMCB_CLEAN_ASID
     100                                                 | HMSVM_VMCB_CLEAN_TPR
     101                                                 | HMSVM_VMCB_CLEAN_NP
     102                                                 | HMSVM_VMCB_CLEAN_CRX
     103                                                 | HMSVM_VMCB_CLEAN_DRX
     104                                                 | HMSVM_VMCB_CLEAN_DT
     105                                                 | HMSVM_VMCB_CLEAN_SEG
     106                                                 | HMSVM_VMCB_CLEAN_CR2
     107                                                 | HMSVM_VMCB_CLEAN_LBR
     108                                                 | HMSVM_VMCB_CLEAN_AVIC)
    96109/** @} */
     110
     111/** @name SVM-transient.
     112 *
     113 * A state structure for holding miscellaneous information across AMD-V
     114 * VMRUN/#VMEXIT operation, restored after the transition.
     115 *
     116 * @{ */
     117typedef struct SVMTRANSIENT
     118{
     119    /** The host's rflags/eflags. */
     120    RTCCUINTREG     uEFlags;
     121} SVMTRANSIENT, *PSVMTRANSIENT;
     122/** @}  */
     123
    97124
    98125/**
     
    416443    if (uMsr <= 0x00001FFF)
    417444    {
    418         /* Pentium-compatible MSRs */
     445        /* Pentium-compatible MSRs. */
    419446        ulBit    = uMsr * 2;
    420447    }
     
    422449             && uMsr <= 0xC0001FFF)
    423450    {
    424         /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
     451        /* AMD Sixth Generation x86 Processor MSRs. */
    425452        ulBit = (uMsr - 0xC0000000) * 2;
    426453        pbMsrBitmap += 0x800;
     
    429456             && uMsr <= 0xC0011FFF)
    430457    {
    431         /* AMD Seventh and Eighth Generation Processor MSRs */
     458        /* AMD Seventh and Eighth Generation Processor MSRs. */
    432459        ulBit = (uMsr - 0xC0001000) * 2;
    433460        pbMsrBitmap += 0x1000;
     
    17471774 * @retval VINF_* scheduling changes, we have to go back to ring-3.
    17481775 *
    1749  * @param   pVCpu       Pointer to the VMCPU.
    1750  * @param   pCtx        Pointer to the guest-CPU context.
    1751  */
    1752 DECLINE(int) hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx)
     1776 * @param   pVCpu           Pointer to the VMCPU.
     1777 * @param   pCtx            Pointer to the guest-CPU context.
     1778 * @param   pSvmTransient   Pointer to the SVM transient structure.
     1779 */
     1780DECLINE(int) hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    17531781{
    17541782    /* Check force flag actions that might require us to go back to ring-3. */
     
    17591787#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    17601788    /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
    1761     pVmxTransient->uEFlags = ASMIntDisableFlags();
     1789    pSvmTransient->uEFlags = ASMIntDisableFlags();
    17621790    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    17631791    {
    1764         ASMSetFlags(pVmxTransient->uEFlags);
     1792        ASMSetFlags(pSvmTransient->uEFlags);
    17651793        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    17661794        /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
     
    17901818 * @param   pVCpu           Pointer to the VMCPU.
    17911819 * @param   pCtx            Pointer to the guest-CPU context.
     1820 * @param   pSvmTransient   Pointer to the SVM transient structure.
    17921821 *
    17931822 * @remarks Called with preemption disabled.
    17941823 * @remarks No-long-jump zone!!!
    17951824 */
    1796 DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1825DECLINLINE(void) hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    17971826{
    17981827    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    18011830#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    18021831    /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
    1803     pVmxTransient->uEFlags = ASMIntDisableFlags();
     1832    pSvmTransient->uEFlags = ASMIntDisableFlags();
    18041833    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    18051834#endif
     
    18181847    AssertRC(rc);
    18191848    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
    1820 
     1849    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
     1850
     1851    /* Flush the appropriate tagged-TLB entries. */
     1852    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB-shootdowns, set this across the world switch. */
     1853    hmR0SvmFlushTaggedTlb(pVCpu);
     1854    Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
     1855
     1856    TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
     1857                                                                    to start executing. */
     1858
     1859    /*
     1860     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
     1861     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
     1862     *
     1863     * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc).
     1864     */
     1865    u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
     1866    if (    (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
     1867        && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
     1868    {
     1869        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     1870        uint64_t u64GuestTscAux = 0;
     1871        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);
     1872        AssertRC(rc2);
     1873        ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
     1874    }
    18211875}
    18221876
     
    18401894     */
    18411895#ifdef VBOX_WITH_KERNEL_USING_XMM
    1842     return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
    1843                              pVCpu->hm.s.svm.pfnVMRun);
     1896    HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
     1897                          pVCpu->hm.s.svm.pfnVMRun);
    18441898#else
    1845     return pVCpu->hm.s.svm.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
     1899    pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);
    18461900#endif
    18471901}
     1902
     1903
     1904/**
     1905 * Performs some essential restoration of state after running guest code in
     1906 * AMD-V.
     1907 *
     1908 * @param   pVM             Pointer to the VM.
     1909 * @param   pVCpu           Pointer to the VMCPU.
     1910 * @param   pMixedCtx       Pointer to the guest-CPU context. The data maybe
     1911 *                          out-of-sync. Make sure to update the required fields
     1912 *                          before using them.
     1913 * @param   pSvmTransient   Pointer to the SVM transient structure.
     1914 * @param   rcVMRun         Return code of VMRUN.
     1915 *
     1916 * @remarks Called with interrupts disabled.
     1917 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
     1918 *          unconditionally when it is safe to do so.
     1919 */
     1920DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, rcVMRun)
     1921{
     1922    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1923
     1924    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
     1925    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
     1926
     1927    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1928    pVmcb->u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;             /* Mark the VMCB-state cache as unmodified by VMM. */
     1929
     1930    /* Restore host's TSC_AUX if required. */
     1931    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
     1932    {
     1933        if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
     1934            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
     1935
     1936        /** @todo Find a way to fix hardcoding a guestimate.  */
     1937        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() +
     1938                             pVmcb->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
     1939    }
     1940
     1941    TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
     1942    Assert(!(ASMGetFlags() & X86_EFL_IF));
     1943    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     1944
     1945    /* -XXX- TPR patching? */
     1946
     1947    ASMSetFlags(pSvmTransient->uEFlags);                        /* Enable interrupts. */
     1948
     1949    /* --XXX- todo */
     1950}
     1951
    18481952
    18491953
     
    18611965    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    18621966
     1967    SVMTRANSIENT SvmTransient;
    18631968    uint32_t cLoops = 0;
    18641969    PSVMVMCB pVmcb  = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     
    18741979        /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
    18751980        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    1876         rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx);
     1981        rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);
    18771982        if (rc != VINF_SUCCESS)
    18781983            break;
     
    18851990        VMMRZCallRing3Disable(pVCpu);
    18861991        VMMRZCallRing3RemoveNotification(pVCpu);
    1887         hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx);
     1992        hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
    18881993
    18891994        rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette