VirtualBox

Changeset 15377 in vbox


Ignore:
Timestamp:
Dec 12, 2008 3:42:40 PM (16 years ago)
Author:
vboxsync
Message:

Program entry and exit controls on each guest state sync.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r15371 r15377  
    303303
    304304        /* VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
    305         * Set required bits to one and zero according to the MSR capabilities.
    306         */
     305         * Set required bits to one and zero according to the MSR capabilities.
     306         */
    307307        val  = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
    308308        /* External and non-maskable interrupts cause VM-exits. */
     
    314314
    315315        /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
    316         * Set required bits to one and zero according to the MSR capabilities.
    317         */
     316         * Set required bits to one and zero according to the MSR capabilities.
     317         */
    318318        val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
    319319        /* Program which event cause VM-exits and which features we want to use. */
     
    363363        {
    364364            /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2
    365             * Set required bits to one and zero according to the MSR capabilities.
    366             */
     365             * Set required bits to one and zero according to the MSR capabilities.
     366             */
    367367            val  = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
    368368            val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
     
    387387
    388388        /* VMX_VMCS_CTRL_CR3_TARGET_COUNT
    389         * Set required bits to one and zero according to the MSR capabilities.
    390         */
     389         * Set required bits to one and zero according to the MSR capabilities.
     390         */
    391391        rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR3_TARGET_COUNT, 0);
    392392        AssertRC(rc);
    393393
    394         /* VMX_VMCS_CTRL_EXIT_CONTROLS
    395         * Set required bits to one and zero according to the MSR capabilities.
    396         */
    397         val  = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
    398 
    399         /* Save debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
    400         val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
    401 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
    402         if (VMX_IS_64BIT_HOST_MODE())
    403             val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;
    404         /* else: Must be zero when AMD64 is not available. */
    405 #endif
    406         val &= pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
    407         /* Don't acknowledge external interrupts on VM-exit. */
    408         rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, val);
    409         AssertRC(rc);
    410 
    411394        /* Forward all exception except #NM & #PF to the guest.
    412         * We always need to check pagefaults since our shadow page table can be out of sync.
    413         * And we always lazily sync the FPU & XMM state.
    414         */
     395         * We always need to check pagefaults since our shadow page table can be out of sync.
     396         * And we always lazily sync the FPU & XMM state.
     397         */
    415398
    416399        /** @todo Possible optimization:
    417         * Keep the FPU and XMM state current in the EM thread. That way there's no need to
    418         * lazily sync anything, but the downside is that we can't use the FPU stack or XMM
    419         * registers ourselves of course.
    420         *
    421         * Note: only possible if the current state is actually ours (X86_CR0_TS flag)
    422         */
     400         * Keep the FPU and XMM state current in the EM thread. That way there's no need to
     401         * lazily sync anything, but the downside is that we can't use the FPU stack or XMM
     402         * registers ourselves of course.
     403         *
     404         * Note: only possible if the current state is actually ours (X86_CR0_TS flag)
     405         */
    423406
    424407        /* Don't filter page faults; all of them should cause a switch. */
     
    11011084    X86EFLAGS   eflags;
    11021085
     1086    /* VMX_VMCS_CTRL_ENTRY_CONTROLS
     1087     * Set required bits to one and zero according to the MSR capabilities.
     1088     */
     1089    val  = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
     1090    /* Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
     1091    val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
     1092
     1093    /* 64 bits guest mode? */
     1094    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
     1095        val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
     1096    /* else Must be zero when AMD64 is not available. */
     1097
     1098    /* Mask away the bits that the CPU doesn't support */
     1099    val &= pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
     1100    rc = VMXWriteCachedVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
     1101    AssertRC(rc);
     1102
     1103    /* VMX_VMCS_CTRL_EXIT_CONTROLS
     1104     * Set required bits to one and zero according to the MSR capabilities.
     1105     */
     1106    val  = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
     1107
     1108    /* Save debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
     1109    val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
     1110#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL)
     1111    if (VMX_IS_64BIT_HOST_MODE())
     1112        val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;
     1113    /* else: Must be zero when AMD64 is not available. */
     1114#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
     1115    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
     1116        val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;      /* our switcher goes to long mode */
     1117    else
     1118        val &= ~VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;
     1119#endif
     1120    val &= pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
     1121    /* Don't acknowledge external interrupts on VM-exit. */
     1122    rc = VMXWriteCachedVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, val);
     1123    AssertRC(rc);
     1124
    11031125    /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
    11041126    if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
     
    15291551        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCIntercept);
    15301552    }
    1531 
    1532     /* VMX_VMCS_CTRL_ENTRY_CONTROLS
    1533      * Set required bits to one and zero according to the MSR capabilities.
    1534      */
    1535     val  = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
    1536     /* Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
    1537     val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
    1538 
    1539     /* 64 bits guest mode? */
    1540     if (pCtx->msrEFER & MSR_K6_EFER_LMA)
    1541         val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
    1542     /* else Must be zero when AMD64 is not available. */
    1543 
    1544     /* Mask away the bits that the CPU doesn't support */
    1545     val &= pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
    1546     rc = VMXWriteCachedVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
    1547     AssertRC(rc);
    15481553
    15491554    /* 64 bits guest mode? */
     
    35133518    pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
    35143519
    3515 
    35163520#ifdef DEBUG
    35173521    pCache->TestIn.pPageCpuPhys = 0;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette