VirtualBox

Changeset 45947 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 8, 2013 12:27:58 PM (12 years ago)
Author:
vboxsync
Message:

VMM: HM cleanup for CTRL, CONTROLS in symbolic names.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r45863 r45947  
    528528                 * Timer Does Not Count Down at the Rate Specified" erratum.
    529529                 */
    530                 if (g_HvmR0.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
     530                if (g_HvmR0.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER)
    531531                {
    532532                    g_HvmR0.vmx.fUsePreemptTimer   = true;
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45946 r45947  
    761761#endif
    762762
    763         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     763        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    764764            hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    765765
     
    852852
    853853        /* Allocate the Virtual-APIC page for transparent TPR accesses. */
    854         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     854        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    855855        {
    856856            rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
     
    861861
    862862        /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
    863         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     863        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    864864        {
    865865            rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
     
    16041604    uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;       /* Bits cleared here must always be cleared. */
    16051605
    1606     val |=   VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT           /* External interrupts causes a VM-exits. */
    1607            | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;              /* Non-maskable interrupts causes a VM-exit. */
    1608     Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI));
     1606    val |=   VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT           /* External interrupts causes a VM-exits. */
     1607           | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;              /* Non-maskable interrupts causes a VM-exit. */
     1608    Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
    16091609
    16101610    /* Enable the VMX preemption timer. */
    16111611    if (pVM->hm.s.vmx.fUsePreemptTimer)
    16121612    {
    1613         Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER);
    1614         val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
     1613        Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
     1614        val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
    16151615    }
    16161616
     
    16221622    }
    16231623
    1624     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
     1624    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
    16251625    AssertRCReturn(rc, rc);
    16261626
     
    16471647    uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;          /* Bits cleared here must be cleared in the VMCS. */
    16481648
    1649     val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT                  /* HLT causes a VM-exit. */
    1650            | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING        /* Use TSC-offsetting. */
    1651            | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT               /* MOV DRx causes a VM-exit. */
    1652            | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT            /* All IO instructions cause a VM-exit. */
    1653            | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT                /* RDPMC causes a VM-exit. */
    1654            | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT              /* MONITOR causes a VM-exit. */
    1655            | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT;               /* MWAIT causes a VM-exit. */
    1656 
    1657     /* We toggle VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
    1658     if (   !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
    1659         ||  (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT))
    1660     {
    1661         LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT combo!"));
     1649    val |=   VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT                  /* HLT causes a VM-exit. */
     1650           | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING        /* Use TSC-offsetting. */
     1651           | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT               /* MOV DRx causes a VM-exit. */
     1652           | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT            /* All IO instructions cause a VM-exit. */
     1653           | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT                /* RDPMC causes a VM-exit. */
     1654           | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT              /* MONITOR causes a VM-exit. */
     1655           | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT;               /* MWAIT causes a VM-exit. */
     1656
     1657    /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
     1658    if (   !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
     1659        ||  (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
     1660    {
     1661        LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
    16621662        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    16631663    }
     
    16671667    {
    16681668        Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);                      /* Paranoia. */
    1669         val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
    1670                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    1671                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     1669        val |=   VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
     1670               | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     1671               | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    16721672    }
    16731673
    16741674    /* Use TPR shadowing if supported by the CPU. */
    1675     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     1675    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    16761676    {
    16771677        Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
     
    16811681        AssertRCReturn(rc, rc);
    16821682
    1683         val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW;         /* CR8 reads from the Virtual-APIC page. */
     1683        val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW;         /* CR8 reads from the Virtual-APIC page. */
    16841684                                                                        /* CR8 writes causes a VM-exit based on TPR threshold. */
    1685         Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT));
    1686         Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT));
     1685        Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
     1686        Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
    16871687    }
    16881688    else
    16891689    {
    1690         val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT        /* CR8 reads causes a VM-exit. */
    1691                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;        /* CR8 writes causes a VM-exit. */
     1690        val |=   VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT        /* CR8 reads causes a VM-exit. */
     1691               | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;        /* CR8 writes causes a VM-exit. */
    16921692    }
    16931693
    16941694    /* Use MSR-bitmaps if supported by the CPU. */
    1695     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    1696     {
    1697         val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
     1695    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     1696    {
     1697        val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
    16981698
    16991699        Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
     
    17281728    }
    17291729
    1730     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
     1730    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
    17311731    AssertRCReturn(rc, rc);
    17321732
     
    17511751            /*
    17521752             * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
    1753              * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT when INVPCID is executed by the guest.
     1753             * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
    17541754             * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
    17551755             */
     
    17791779        {
    17801780            val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;                     /* Enable RDTSCP support. */
    1781             if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     1781            if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    17821782                hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    17831783        }
     
    17901790        }
    17911791
    1792         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
     1792        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
    17931793        AssertRCReturn(rc, rc);
    17941794
     
    21382138    /* Assertion is right but we would not have updated u32ExitCtls yet. */
    21392139#if 0
    2140     if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE))
     2140    if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
    21412141        Assert(uSelSS != 0);
    21422142#endif
     
    23622362
    23632363        /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
    2364         val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
     2364        val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
    23652365
    23662366        /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
    23672367        if (CPUMIsGuestInLongModeEx(pMixedCtx))
    2368             val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
     2368            val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
    23692369        else
    2370             Assert(!(val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST));
     2370            Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
    23712371
    23722372        /*
    23732373         * The following should not be set (since we're not in SMM mode):
    2374          * - VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM
    2375          * - VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON
     2374         * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
     2375         * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
    23762376         */
    23772377
    2378         /** @todo VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR,
    2379          *        VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR,
    2380          *  VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR */
     2378        /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
     2379         *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
     2380         *  VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
    23812381
    23822382        if ((val & zap) != val)
     
    23872387        }
    23882388
    2389         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
     2389        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
    23902390        AssertRCReturn(rc, rc);
    23912391
     
    24202420
    24212421        /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
    2422         val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
     2422        val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
    24232423
    24242424        /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */
    24252425#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    24262426        if (HMVMX_IS_64BIT_HOST_MODE())
    2427             val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;
     2427            val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
    24282428        else
    2429             Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
     2429            Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    24302430#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    24312431        if (CPUMIsGuestInLongModeEx(pMixedCtx))
    2432             val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;    /* The switcher goes to long mode. */
     2432            val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;    /* The switcher goes to long mode. */
    24332433        else
    2434             Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
     2434            Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    24352435#endif
    24362436
    24372437        /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
    2438         Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT));
    2439 
    2440         /** @todo VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR,
    2441          *        VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR,
    2442          *        VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR,
    2443          *        VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR,
    2444          *        VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR. */
    2445 
    2446         if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
    2447             val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER;
     2438        Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
     2439
     2440        /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
     2441         *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
     2442         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
     2443         *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
     2444         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
     2445
     2446        if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
     2447            val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
    24482448
    24492449        if ((val & zap) != val)
     
    24542454        }
    24552455
    2456         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, val);
     2456        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
    24572457        AssertRCReturn(rc, rc);
    24582458
     
    24812481    {
    24822482        /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
    2483         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     2483        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    24842484        {
    24852485            Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
     
    25092509                Assert(!CPUMIsGuestInLongModeEx(pMixedCtx));     /* EFER always up-to-date. */
    25102510                pMixedCtx->msrLSTAR = u8GuestTpr;
    2511                 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     2511                if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    25122512                {
    25132513                    /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
     
    27422742            {
    27432743                /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
    2744                 pVCpu->hm.s.vmx.u32ProcCtls &= ~(  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    2745                                                  | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
     2744                pVCpu->hm.s.vmx.u32ProcCtls &= ~(  VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     2745                                                 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
    27462746            }
    27472747            else
    27482748            {
    27492749                /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
    2750                 pVCpu->hm.s.vmx.u32ProcCtls |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    2751                                                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     2750                pVCpu->hm.s.vmx.u32ProcCtls |=   VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     2751                                               | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    27522752            }
    27532753
    27542754            /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
    27552755            if (pVM->hm.s.vmx.fUnrestrictedGuest)
    2756                 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT;
    2757 
    2758             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     2756                pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT;
     2757
     2758            rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    27592759            AssertRCReturn(rc, rc);
    27602760        }
     
    30823082#ifdef VBOX_STRICT
    30833083    /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
    3084     if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
     3084    if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
    30853085    {
    30863086        Assert(!(pMixedCtx->dr[7] >> 32));                        /* upper 32 bits are reserved (MBZ). */
     
    30983098    {
    30993099        /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
    3100         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
    3101         {
    3102             pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    3103             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     3100        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
     3101        {
     3102            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
     3103            rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    31043104            AssertRCReturn(rc, rc);
    31053105            Assert(fInterceptDB == false);
     
    31483148    /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
    31493149    if (fInterceptMovDRx)
    3150         pVCpu->hm.s.vmx.u32ProcCtls   |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     3150        pVCpu->hm.s.vmx.u32ProcCtls   |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
    31513151    else
    3152         pVCpu->hm.s.vmx.u32ProcCtls   &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     3152        pVCpu->hm.s.vmx.u32ProcCtls   &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
    31533153
    31543154    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP,   pVCpu->hm.s.vmx.u32XcptBitmap);
    31553155    AssertRCReturn(rc, rc);
    3156     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     3156    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    31573157    AssertRCReturn(rc, rc);
    31583158
     
    36383638        {
    36393639            /** @todo support save IA32_EFER, i.e.
    3640              *        VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR, in which case the
     3640             *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, in which case the
    36413641             *        guest EFER need not be part of the VM-entry MSR-load area. */
    36423642            pGuestMsr->u32IndexMSR = MSR_K6_EFER;
     
    38573857                uint64_t        u64Val;
    38583858                HMVMXHCUINTREG  uHCReg;
    3859                 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &u32Val);         AssertRC(rc);
    3860                 Log(("VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS       %#RX32\n", u32Val));
    3861                 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &u32Val);        AssertRC(rc);
    3862                 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS      %#RX32\n", u32Val));
    3863                 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, &u32Val);       AssertRC(rc);
    3864                 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2     %#RX32\n", u32Val));
    3865                 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &u32Val);            AssertRC(rc);
    3866                 Log(("VMX_VMCS32_CTRL_ENTRY_CONTROLS          %#RX32\n", u32Val));
    3867                 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, &u32Val);             AssertRC(rc);
    3868                 Log(("VMX_VMCS32_CTRL_EXIT_CONTROLS           %#RX32\n", u32Val));
     3859                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);                  AssertRC(rc);
     3860                Log(("VMX_VMCS32_CTRL_PIN_EXEC                %#RX32\n", u32Val));
     3861                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);                 AssertRC(rc);
     3862                Log(("VMX_VMCS32_CTRL_PROC_EXEC               %#RX32\n", u32Val));
     3863                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);       AssertRC(rc);
     3864                Log(("VMX_VMCS32_CTRL_PROC_EXEC2              %#RX32\n", u32Val));
     3865                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);            AssertRC(rc);
     3866                Log(("VMX_VMCS32_CTRL_ENTRY                   %#RX32\n", u32Val));
     3867                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);             AssertRC(rc);
     3868                Log(("VMX_VMCS32_CTRL_EXIT                    %#RX32\n", u32Val));
    38693869                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val);          AssertRC(rc);
    38703870                Log(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT        %#RX32\n", u32Val));
     
    45114511
    45124512        uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    4513         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);           AssertRC(rc);
     4513        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);          AssertRC(rc);
    45144514    }
    45154515    else
     
    45214521        if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
    45224522        {
    4523             /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
    4524             rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);    AssertRC(rc);
    4525 
    4526             pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    4527             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);  AssertRC(rc);
     4523            /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
     4524            rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);   AssertRC(rc);
     4525
     4526            pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     4527            rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);          AssertRC(rc);
    45284528            STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    45294529        }
     
    45314531        {
    45324532            /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
    4533             pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    4534             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);  AssertRC(rc);
     4533            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     4534            rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);          AssertRC(rc);
    45354535            STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
    45364536        }
     
    45394539    {
    45404540        /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
    4541         pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    4542         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);      AssertRC(rc);
     4541        pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     4542        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);               AssertRC(rc);
    45434543        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    45444544    }
     
    51705170     * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
    51715171     *
    5172      * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again.
    5173      * We cover for it here.
     5172     * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
    51745173     */
    51755174    if (VMMRZCallRing3IsEnabled(pVCpu))
     
    55255524            Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    55265525        }
     5526
     5527        /* Pending HM PAE PDPEs. */
    55275528        if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    55285529        {
     
    57575758        CPUMR0LoadHostDebugState(pVM, pVCpu);
    57585759        Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    5759         Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     5760        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
    57605761    }
    57615762
     
    58755876DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
    58765877{
    5877     if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
    5878     {
    5879         if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
    5880         {
    5881             pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
    5882             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     5878    if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
     5879    {
     5880        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
     5881        {
     5882            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
     5883            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    58835884            AssertRC(rc);
    58845885        }
     
    66766677
    66776678    /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
    6678     if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     6679    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    66796680        pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
    66806681
     
    67146715     */
    67156716    if (    (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    6716         && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     6717        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    67176718    {
    67186719        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     
    67576758    pVmxTransient->fVectoringPF        = false;                 /* Vectoring page-fault needs to be determined later. */
    67586759
    6759     if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     6760    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    67606761    {
    67616762#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     
    68176818         * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
    68186819         */
    6819         if (   (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     6820        if (   (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    68206821            && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
    68216822        {
     
    70817082
    70827083    uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
    7083     Assert(   !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT)
     7084    Assert(   !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
    70847085           && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
    70857086
     
    71827183
    71837184    /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
    7184     Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT);
    7185     pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
    7186     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     7185    Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
     7186    pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
     7187    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    71877188    AssertRCReturn(rc, rc);
    71887189
     
    72827283        Assert(pVmxTransient->cbInstr == 2);
    72837284        /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
    7284         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
     7285        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
    72857286            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
    72867287    }
     
    73127313        Assert(pVmxTransient->cbInstr == 3);
    73137314        /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
    7314         if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
     7315        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
    73157316            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
    73167317    }
     
    75327533{
    75337534    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7534     Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT);
     7535    Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
    75357536    int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    75367537    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     
    77977798
    77987799        /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
    7799         if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)))
     7800        if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)))
    78007801        {
    78017802            switch (pMixedCtx->ecx)
     
    78497850{
    78507851    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7851     /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT. */
     7852    /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
    78527853    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
    7853     if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
     7854    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
    78547855        return VERR_EM_INTERPRETER;
    78557856    AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     
    78657866{
    78667867    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7867     Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW);
     7868    Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
    78687869
    78697870    /*
     
    79367937                    break;
    79377938                case 8: /* CR8 */
    7938                     Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     7939                    Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    79397940                    /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
    79407941                    /* We don't need to update HM_CHANGED_VMX_GUEST_APIC_STATE here as this -cannot- happen with TPR shadowing. */
     
    79607961            /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
    79617962            Assert(   VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
    7962                    || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     7963                   || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    79637964
    79647965            rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
     
    82468247{
    82478248    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    8248     Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG);
    8249     pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    8250     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     8249    Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
     8250    pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
     8251    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    82518252    AssertRCReturn(rc, rc);
    82528253    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
     
    82898290        case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
    82908291        {
    8291             if (  (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     8292            if (  (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    82928293                && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
    82938294            {
     
    83498350    {
    83508351        /* Don't intercept MOV DRx. */
    8351         pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    8352         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     8352        pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
     8353        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    83538354        AssertRCReturn(rc, rc);
    83548355
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r45894 r45947  
    318318    pVM->hm.s.vmx.hMemObjApicAccess = NIL_RTR0MEMOBJ;
    319319
    320     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     320    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    321321    {
    322322        /* Allocate one page for the APIC physical page (serves for filtering accesses). */
     
    384384
    385385        /* Allocate the MSR bitmap if this feature is supported. */
    386         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     386        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    387387        {
    388388            rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, PAGE_SIZE, false /* fExecutable */);
     
    606606
    607607        /*
    608          * VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
     608         * VMX_VMCS_CTRL_PIN_EXEC
    609609         * Set required bits to one and zero according to the MSR capabilities.
    610610         */
    611611        val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
    612         val |=    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT      /* External interrupts */
    613                 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;         /* Non-maskable interrupts */
     612        val |=    VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT      /* External interrupts */
     613                | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;         /* Non-maskable interrupts */
    614614
    615615        /*
     
    617617         */
    618618        if (pVM->hm.s.vmx.fUsePreemptTimer)
    619             val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
     619            val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
    620620        val &= pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
    621621
    622         rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
     622        rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PIN_EXEC, val);
    623623        AssertRC(rc);
    624624        pVCpu->hm.s.vmx.u32PinCtls = val;
    625625
    626626        /*
    627          * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
     627         * VMX_VMCS_CTRL_PROC_EXEC
    628628         * Set required bits to one and zero according to the MSR capabilities.
    629629         */
    630630        val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
    631631        /* Program which event cause VM-exits and which features we want to use. */
    632         val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
    633                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING
    634                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
    635                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
    636                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT
    637                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT
    638                | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT;     /* don't execute mwait or else we'll idle inside
     632        val |=   VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT
     633               | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING
     634               | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT
     635               | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT
     636               | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT
     637               | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT
     638               | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT;     /* don't execute mwait or else we'll idle inside
    639639                                                                      the guest (host thinks the cpu load is high) */
    640640
     
    642642        if (!pVM->hm.s.fNestedPaging)
    643643        {
    644             val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
    645                    | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    646                    | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     644            val |=   VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
     645                   | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     646                   | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    647647        }
    648648
    649649        /*
    650          * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch
     650         * VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT might cause a vmlaunch
    651651         * failure with an invalid control fields error. (combined with some other exit reasons)
    652652         */
    653         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     653        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    654654        {
    655655            /* CR8 reads from the APIC shadow page; writes cause an exit is they lower the TPR below the threshold */
    656             val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW;
     656            val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW;
    657657            Assert(pVM->hm.s.vmx.pbApicAccess);
    658658        }
    659659        else
    660660            /* Exit on CR8 reads & writes in case the TPR shadow feature isn't present. */
    661             val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;
    662 
    663         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     661            val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;
     662
     663        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    664664        {
    665665            Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    666             val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
     666            val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
    667667        }
    668668
     
    675675        pVCpu->hm.s.vmx.u32ProcCtls = val;
    676676
    677         rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
     677        rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, val);
    678678        AssertRC(rc);
    679679
     
    681681        {
    682682            /*
    683              * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2
     683             * VMX_VMCS_CTRL_PROC_EXEC2
    684684             * Set required bits to one and zero according to the MSR capabilities.
    685685             */
     
    706706            val &= pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
    707707            pVCpu->hm.s.vmx.u32ProcCtls2 = val;
    708             rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
     708            rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC2, val);
    709709            AssertRC(rc);
    710710        }
     
    749749         * Set the MSR bitmap address.
    750750         */
    751         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     751        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    752752        {
    753753            Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
     
    794794        AssertRC(rc);
    795795
    796         if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     796        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    797797        {
    798798            Assert(pVM->hm.s.vmx.hMemObjApicAccess);
     
    11331133            if (!(pCtx->eflags.u32 & X86_EFL_IF))
    11341134            {
    1135                 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
     1135                if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
    11361136                {
    11371137                    LogFlow(("Enable irq window exit!\n"));
    1138                     pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
    1139                     rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     1138                    pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
     1139                    rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    11401140                    AssertRC(rc);
    11411141                }
     
    17811781        if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset > TMCpuTickGetLastSeen(pVCpu))
    17821782        {
    1783             /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
     1783            /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
    17841784            rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);
    17851785            AssertRC(rc);
    17861786
    1787             pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    1788             rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     1787            pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     1788            rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    17891789            AssertRC(rc);
    17901790            STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
     
    17971797                     TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hm.s.vmx.u64TSCOffset,
    17981798                     TMCpuTickGet(pVCpu)));
    1799             pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    1800             rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     1799            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     1800            rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    18011801            AssertRC(rc);
    18021802            STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
     
    18051805    else
    18061806    {
    1807         pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    1808         rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     1807        pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     1808        rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    18091809        AssertRC(rc);
    18101810        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
     
    18311831
    18321832    /*
    1833      * VMX_VMCS_CTRL_ENTRY_CONTROLS
     1833     * VMX_VMCS_CTRL_ENTRY
    18341834     * Set required bits to one and zero according to the MSR capabilities.
    18351835     */
     
    18401840     * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs
    18411841     */
    1842     val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
     1842    val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
    18431843
    18441844    if (CPUMIsGuestInLongModeEx(pCtx))
    1845         val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
     1845        val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
    18461846    /* else Must be zero when AMD64 is not available. */
    18471847
     
    18501850     */
    18511851    val &= pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
    1852     rc = VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
     1852    rc = VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY, val);
    18531853    AssertRC(rc);
    18541854
    18551855    /*
    1856      * VMX_VMCS_CTRL_EXIT_CONTROLS
     1856     * VMX_VMCS_CTRL_EXIT
    18571857     * Set required bits to one and zero according to the MSR capabilities.
    18581858     */
     
    18631863     * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs
    18641864     */
    1865     val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
     1865    val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
    18661866
    18671867#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    18681868    if (VMX_IS_64BIT_HOST_MODE())
    1869         val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;
     1869        val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
    18701870    /* else Must be zero when AMD64 is not available. */
    18711871#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    18721872    if (CPUMIsGuestInLongModeEx(pCtx))
    1873         val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;      /* our switcher goes to long mode */
     1873        val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;      /* our switcher goes to long mode */
    18741874    else
    1875         Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
     1875        Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    18761876#endif
    18771877    val &= pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
     
    18801880     * Don't acknowledge external interrupts on VM-exit.
    18811881     */
    1882     rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXIT_CONTROLS, val);
     1882    rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXIT, val);
    18831883    AssertRC(rc);
    18841884
     
    20792079            {
    20802080                /* Disable CR3 read/write monitoring as we don't need it for EPT. */
    2081                 pVCpu->hm.s.vmx.u32ProcCtls &=  ~(  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    2082                                                     | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
     2081                pVCpu->hm.s.vmx.u32ProcCtls &=  ~(  VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     2082                                                    | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
    20832083            }
    20842084            else
    20852085            {
    20862086                /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */
    2087                 pVCpu->hm.s.vmx.u32ProcCtls |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    2088                                                  | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     2087                pVCpu->hm.s.vmx.u32ProcCtls |=   VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     2088                                                 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    20892089            }
    2090             rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     2090            rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    20912091            AssertRC(rc);
    20922092        }
     
    22012201#if 0
    22022202    /* Enable single stepping if requested and CPU supports it. */
    2203     if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
     2203    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
    22042204        if (DBGFIsStepping(pVCpu))
    22052205        {
    2206             pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    2207             rc = VMXWriteVmcs(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     2206            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
     2207            rc = VMXWriteVmcs(VMX_VMCS_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    22082208            AssertRC(rc);
    22092209        }
     
    23012301
    23022302            /* Disable DRx move intercepts. */
    2303             pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    2304             rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     2303            pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
     2304            rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    23052305            AssertRC(rc);
    23062306
     
    29512951        RTCCUINTREG val2;
    29522952
    2953         rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &val2);
     2953        rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_PIN_EXEC, &val2);
    29542954        AssertRC(rc2);
    2955         Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n",  val2));
     2955        Log2(("VMX_VMCS_CTRL_PIN_EXEC = %08x\n",  val2));
    29562956
    29572957        /* allowed zero */
    29582958        if ((val2 & pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)
    2959             Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n"));
     2959            Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC: zero\n"));
    29602960
    29612961        /* allowed one */
    29622962        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)
    2963             Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n"));
    2964 
    2965         rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &val2);
     2963            Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC: one\n"));
     2964
     2965        rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_PROC_EXEC, &val2);
    29662966        AssertRC(rc2);
    2967         Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n",  val2));
     2967        Log2(("VMX_VMCS_CTRL_PROC_EXEC = %08x\n",  val2));
    29682968
    29692969        /*
     
    29722972        if (pVM->hm.s.fNestedPaging)
    29732973        {
    2974             val2 |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
    2975                     | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    2976                     | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
     2974            val2 |=   VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
     2975                    | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     2976                    | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    29772977        }
    29782978
    29792979        /* allowed zero */
    29802980        if ((val2 & pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)
    2981             Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n"));
     2981            Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC: zero\n"));
    29822982
    29832983        /* allowed one */
    29842984        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)
    2985             Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n"));
    2986 
    2987         rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &val2);
     2985            Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC: one\n"));
     2986
     2987        rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_ENTRY, &val2);
    29882988        AssertRC(rc2);
    2989         Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n",  val2));
     2989        Log2(("VMX_VMCS_CTRL_ENTRY = %08x\n",  val2));
    29902990
    29912991        /* allowed zero */
    29922992        if ((val2 & pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0)
    2993             Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n"));
     2993            Log(("Invalid VMX_VMCS_CTRL_ENTRY: zero\n"));
    29942994
    29952995        /* allowed one */
    29962996        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_entry.n.allowed1) != 0)
    2997             Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n"));
    2998 
    2999         rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_EXIT_CONTROLS, &val2);
     2997            Log(("Invalid VMX_VMCS_CTRL_ENTRY: one\n"));
     2998
     2999        rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_EXIT, &val2);
    30003000        AssertRC(rc2);
    3001         Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n",  val2));
     3001        Log2(("VMX_VMCS_CTRL_EXIT = %08x\n",  val2));
    30023002
    30033003        /* allowed zero */
    30043004        if ((val2 & pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0)
    3005             Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n"));
     3005            Log(("Invalid VMX_VMCS_CTRL_EXIT: zero\n"));
    30063006
    30073007        /* allowed one */
    30083008        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_exit.n.allowed1) != 0)
    3009             Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
     3009            Log(("Invalid VMX_VMCS_CTRL_EXIT: one\n"));
    30103010    }
    30113011    fWasInLongMode = CPUMIsGuestInLongModeEx(pCtx);
     
    33473347     */
    33483348    if (    (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    3349         && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     3349        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    33503350    {
    33513351        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     
    33663366
    33673367    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
    3368     if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     3368    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    33693369    {
    33703370#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     
    42584258        LogFlow(("VMX_EXIT_INT_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip,
    42594259                 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
    4260         pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
    4261         rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     4260        pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
     4261        rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    42624262        AssertRC(rc2);
    42634263        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
     
    44424442                        /* CR8 contains the APIC TPR */
    44434443                        Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1
    4444                                  & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     4444                                 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    44454445                        break;
    44464446
     
    44634463                /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
    44644464                Assert(   VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8
    4465                        || !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     4465                       || !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    44664466
    44674467                rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
     
    45114511        {
    45124512            /* Disable DRx move intercepts. */
    4513             pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    4514             rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     4513            pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
     4514            rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    45154515            AssertRC(rc2);
    45164516
     
    45404540        }
    45414541
    4542         /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first
     4542        /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT after the first
    45434543         *        time and restore DRx registers afterwards */
    45444544        if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
     
    48804880    case VMX_EXIT_MTF:                  /* 37 Exit due to Monitor Trap Flag. */
    48814881        LogFlow(("VMX_EXIT_MTF at %RGv\n", (RTGCPTR)pCtx->rip));
    4882         pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    4883         rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     4882        pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
     4883        rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    48844884        AssertRC(rc2);
    48854885        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
     
    51035103    {
    51045104        CPUMR0LoadHostDebugState(pVM, pVCpu);
    5105         Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     5105        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
    51065106    }
    51075107    else
     
    51165116
    51175117        /* Enable DRx move intercepts again. */
    5118         pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    5119         int rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
     5118        pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
     5119        int rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    51205120        AssertRC(rc);
    51215121
     
    51245124    }
    51255125    else
    5126         Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     5126        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
    51275127
    51285128    /*
     
    53135313                VMXReadVmcs(VMX_VMCS_GUEST_RIP, &val);
    53145314                Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val));
    5315                 VMXReadVmcs(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS,   &val);
    5316                 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS   %08x\n", val));
    5317                 VMXReadVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS,   &val);
    5318                 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS  %08x\n", val));
    5319                 VMXReadVmcs(VMX_VMCS32_CTRL_ENTRY_CONTROLS,       &val);
    5320                 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS      %08x\n", val));
    5321                 VMXReadVmcs(VMX_VMCS32_CTRL_EXIT_CONTROLS,        &val);
    5322                 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS       %08x\n", val));
     5315                VMXReadVmcs(VMX_VMCS32_CTRL_PIN_EXEC,   &val);
     5316                Log(("VMX_VMCS_CTRL_PIN_EXEC   %08x\n", val));
     5317                VMXReadVmcs(VMX_VMCS32_CTRL_PROC_EXEC,   &val);
     5318                Log(("VMX_VMCS_CTRL_PROC_EXEC  %08x\n", val));
     5319                VMXReadVmcs(VMX_VMCS32_CTRL_ENTRY,       &val);
     5320                Log(("VMX_VMCS_CTRL_ENTRY      %08x\n", val));
     5321                VMXReadVmcs(VMX_VMCS32_CTRL_EXIT,        &val);
     5322                Log(("VMX_VMCS_CTRL_EXIT       %08x\n", val));
    53235323
    53245324                VMXReadVmcs(VMX_VMCS_HOST_CR0,  &val);
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r45919 r45947  
    992992    val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
    993993    zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
    994     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT);
    995     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT);
    996     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI);
    997     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER);
     994    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT);
     995    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT);
     996    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI);
     997    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
    998998
    999999    LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS   = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls.u));
    10001000    val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
    10011001    zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
    1002     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT);
    1003     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING);
    1004     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT);
    1005     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT);
    1006     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT);
    1007     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT);
    1008     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT);
    1009     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT);
    1010     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
    1011     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT);
    1012     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT);
    1013     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW);
    1014     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT);
    1015     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
    1016     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT);
    1017     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS);
    1018     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG);
    1019     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS);
    1020     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT);
    1021     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT);
     1002    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
     1003    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING);
     1004    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
     1005    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
     1006    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT);
     1007    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT);
     1008    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
     1009    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT);
     1010    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
     1011    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT);
     1012    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT);
     1013    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
     1014    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
     1015    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
     1016    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT);
     1017    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_IO_BITMAPS);
     1018    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
     1019    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
     1020    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT);
     1021    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
    10221022    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL);
    10231023    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     
    10431043    val = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
    10441044    zap = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
    1045     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG);
    1046     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST);
    1047     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM);
    1048     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON);
    1049     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR);
    1050     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR);
    1051     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR);
     1045    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG);
     1046    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
     1047    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_ENTRY_SMM);
     1048    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON);
     1049    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR);
     1050    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR);
     1051    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR);
    10521052
    10531053    LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS        = %RX64\n", pVM->hm.s.vmx.msr.vmx_exit.u));
    10541054    val = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
    10551055    zap = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
    1056     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG);
    1057     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE);
    1058     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR);
    1059     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT);
    1060     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR);
    1061     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR);
    1062     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR);
    1063     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR);
    1064     VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER);
     1056    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_DEBUG);
     1057    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE);
     1058    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR);
     1059    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
     1060    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR);
     1061    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR);
     1062    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR);
     1063    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR);
     1064    VMX_REPORT_FEATURE(val, zap, VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER);
    10651065
    10661066    if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps)
     
    25732573                        || (pCtx->gs.Sel & X86_SEL_RPL)
    25742574                        || (pCtx->ss.Sel & X86_SEL_RPL))
    2575                     {                       
     2575                    {
    25762576                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
    25772577                        return false;
     
    25892589                        || !hmR3IsDataSelectorOkForVmx(&pCtx->gs)
    25902590                        || !hmR3IsStackSelectorOkForVmx(&pCtx->ss))
    2591                     {                       
     2591                    {
    25922592                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
    25932593                        return false;
     
    25982598                if (pCtx->gdtr.cbGdt)
    25992599                {
    2600                     if (pCtx->tr.Sel > pCtx->gdtr.cbGdt) 
     2600                    if (pCtx->tr.Sel > pCtx->gdtr.cbGdt)
    26012601                    {
    26022602                        STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
  • trunk/src/VBox/VMM/include/HMInternal.h

    r45919 r45947  
    582582#endif
    583583
    584         /** Current VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS. */
     584        /** Current VMX_VMCS32_CTRL_PIN_EXEC. */
    585585        uint32_t                    u32PinCtls;
    586         /** Current VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS. */
     586        /** Current VMX_VMCS32_CTRL_PROC_EXEC. */
    587587        uint32_t                    u32ProcCtls;
    588         /** Current VMX_VMCS32_CTRL_PROC_EXEC2_CONTROLS. */
     588        /** Current VMX_VMCS32_CTRL_PROC_EXEC2. */
    589589        uint32_t                    u32ProcCtls2;
    590         /** Current VMX_VMCS32_CTRL_EXIT_CONTROLS. */
     590        /** Current VMX_VMCS32_CTRL_EXIT. */
    591591        uint32_t                    u32ExitCtls;
    592         /** Current VMX_VMCS32_CTRL_ENTRY_CONTROLS. */
     592        /** Current VMX_VMCS32_CTRL_ENTRY. */
    593593        uint32_t                    u32EntryCtls;
    594594        /** Physical address of the virtual APIC page for TPR caching. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette