VirtualBox

Ignore:
Timestamp:
Dec 26, 2018 3:49:56 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM: Made vmx/svm VCPU state as a union, saves some space now that SVM bits have grown with nested-SVM and other cleanup.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r76477 r76482  
    11191119 *
    11201120 * @returns VBox status code.
    1121  * @param   pHostCpu        Pointer to the global CPU info struct.
     1121 * @param   pHostCpu        The HM physical-CPU structure.
    11221122 * @param   pVM             The cross context VM structure.  Can be
    11231123 *                          NULL after a host resume operation.
     
    11301130 * @param   pHwvirtMsrs     Pointer to the hardware-virtualization MSRs.
    11311131 */
    1132 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     1132VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    11331133                              PCSUPHWVIRTMSRS pHwvirtMsrs)
    11341134{
     
    11691169 *
    11701170 * @returns VBox status code.
    1171  * @param   pHostCpu        Pointer to the global CPU info struct.
    11721171 * @param   pvCpuPage       Pointer to the VMXON region.
    11731172 * @param   HCPhysCpuPage   Physical address of the VMXON region.
     
    11761175 *          similar was used to enable VT-x on the host.
    11771176 */
    1178 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    1179 {
    1180     RT_NOREF3(pHostCpu, pvCpuPage, HCPhysCpuPage);
     1177VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     1178{
     1179    RT_NOREF2(pvCpuPage, HCPhysCpuPage);
    11811180
    11821181    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    14111410static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
    14121411{
    1413     PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1414     uint32_t   cMsrs     = pVCpu->hm.s.vmx.cMsrs;
     1412    PVMXAUTOMSR    pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     1413    uint32_t const cMsrs     = pVCpu->hm.s.vmx.cMsrs;
    14151414
    14161415    for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
     
    14351434    PVMXAUTOMSR pHostMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    14361435    PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1437     uint32_t    cMsrs    = pVCpu->hm.s.vmx.cMsrs;
     1436    uint32_t const cMsrs  = pVCpu->hm.s.vmx.cMsrs;
    14381437
    14391438    for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
     
    18941893 * case where neither EPT nor VPID is supported by the CPU.
    18951894 *
    1896  * @param   pVCpu           The cross context virtual CPU structure.
    1897  * @param   pCpu            Pointer to the global HM struct.
     1895 * @param   pHostCpu    The HM physical-CPU structure.
     1896 * @param   pVCpu       The cross context virtual CPU structure.
    18981897 *
    18991898 * @remarks Called with interrupts disabled.
    19001899 */
    1901 static void hmR0VmxFlushTaggedTlbNone(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     1900static void hmR0VmxFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    19021901{
    19031902    AssertPtr(pVCpu);
    1904     AssertPtr(pCpu);
     1903    AssertPtr(pHostCpu);
    19051904
    19061905    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
    19071906
    1908     Assert(pCpu->idCpu != NIL_RTCPUID);
    1909     pVCpu->hm.s.idLastCpu           = pCpu->idCpu;
    1910     pVCpu->hm.s.cTlbFlushes         = pCpu->cTlbFlushes;
     1907    Assert(pHostCpu->idCpu != NIL_RTCPUID);
     1908    pVCpu->hm.s.idLastCpu           = pHostCpu->idCpu;
     1909    pVCpu->hm.s.cTlbFlushes         = pHostCpu->cTlbFlushes;
    19111910    pVCpu->hm.s.fForceTLBFlush      = false;
    19121911    return;
     
    19171916 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
    19181917 *
    1919  * @param    pVCpu          The cross context virtual CPU structure.
    1920  * @param    pCpu           Pointer to the global HM CPU struct.
     1918 * @param   pHostCpu    The HM physical-CPU structure.
     1919 * @param   pVCpu       The cross context virtual CPU structure.
    19211920 *
    19221921 * @remarks  All references to "ASID" in this function pertains to "VPID" in Intel's
     
    19261925 * @remarks  Called with interrupts disabled.
    19271926 */
    1928 static void hmR0VmxFlushTaggedTlbBoth(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     1927static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    19291928{
    19301929#ifdef VBOX_WITH_STATISTICS
     
    19401939#endif
    19411940
    1942     AssertPtr(pCpu);
    19431941    AssertPtr(pVCpu);
    1944     Assert(pCpu->idCpu != NIL_RTCPUID);
     1942    AssertPtr(pHostCpu);
     1943    Assert(pHostCpu->idCpu != NIL_RTCPUID);
    19451944
    19461945    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    19551954     * cannot reuse the current ASID anymore.
    19561955     */
    1957     if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
    1958         || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
    1959     {
    1960         ++pCpu->uCurrentAsid;
    1961         if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
    1962         {
    1963             pCpu->uCurrentAsid = 1;              /* Wraparound to 1; host uses 0. */
    1964             pCpu->cTlbFlushes++;                 /* All VCPUs that run on this host CPU must use a new VPID. */
    1965             pCpu->fFlushAsidBeforeUse = true;    /* All VCPUs that run on this host CPU must flush their new VPID before use. */
    1966         }
    1967 
    1968         pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
    1969         pVCpu->hm.s.idLastCpu    = pCpu->idCpu;
    1970         pVCpu->hm.s.cTlbFlushes  = pCpu->cTlbFlushes;
     1956    if (   pVCpu->hm.s.idLastCpu   != pHostCpu->idCpu
     1957        || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
     1958    {
     1959        ++pHostCpu->uCurrentAsid;
     1960        if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
     1961        {
     1962            pHostCpu->uCurrentAsid = 1;            /* Wraparound to 1; host uses 0. */
     1963            pHostCpu->cTlbFlushes++;               /* All VCPUs that run on this host CPU must use a new VPID. */
     1964            pHostCpu->fFlushAsidBeforeUse = true;  /* All VCPUs that run on this host CPU must flush their new VPID before use. */
     1965        }
     1966
     1967        pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
     1968        pVCpu->hm.s.idLastCpu    = pHostCpu->idCpu;
     1969        pVCpu->hm.s.cTlbFlushes  = pHostCpu->cTlbFlushes;
    19711970
    19721971        /*
     
    19981997    HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
    19991998
    2000     Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
    2001     Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
    2002     AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
    2003               ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
    2004     AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
    2005               ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
    2006                pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
     1999    Assert(pVCpu->hm.s.idLastCpu == pHostCpu->idCpu);
     2000    Assert(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes);
     2001    AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
     2002              ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
     2003    AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
     2004              ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
     2005               pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
    20072006    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
    2008               ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
     2007              ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
    20092008
    20102009    /* Update VMCS with the VPID. */
     
    20192018 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
    20202019 *
    2021  * @returns VBox status code.
     2020 * @param   pHostCpu    The HM physical-CPU structure.
    20222021 * @param   pVCpu       The cross context virtual CPU structure.
    2023  * @param   pCpu        Pointer to the global HM CPU struct.
    20242022 *
    20252023 * @remarks Called with interrupts disabled.
    20262024 */
    2027 static void hmR0VmxFlushTaggedTlbEpt(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2025static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    20282026{
    20292027    AssertPtr(pVCpu);
    2030     AssertPtr(pCpu);
    2031     Assert(pCpu->idCpu != NIL_RTCPUID);
     2028    AssertPtr(pHostCpu);
     2029    Assert(pHostCpu->idCpu != NIL_RTCPUID);
    20322030    AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
    20332031    AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
     
    20372035     * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
    20382036     */
    2039     if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
    2040         || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
     2037    if (   pVCpu->hm.s.idLastCpu   != pHostCpu->idCpu
     2038        || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    20412039    {
    20422040        pVCpu->hm.s.fForceTLBFlush = true;
     
    20512049    }
    20522050
    2053     pVCpu->hm.s.idLastCpu   = pCpu->idCpu;
    2054     pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
     2051    pVCpu->hm.s.idLastCpu   = pHostCpu->idCpu;
     2052    pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
    20552053
    20562054    if (pVCpu->hm.s.fForceTLBFlush)
     
    20652063 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
    20662064 *
    2067  * @returns VBox status code.
     2065 * @param   pHostCpu    The HM physical-CPU structure.
    20682066 * @param   pVCpu       The cross context virtual CPU structure.
    2069  * @param   pCpu        Pointer to the global HM CPU struct.
    20702067 *
    20712068 * @remarks Called with interrupts disabled.
    20722069 */
    2073 static void hmR0VmxFlushTaggedTlbVpid(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2070static void hmR0VmxFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    20742071{
    20752072    AssertPtr(pVCpu);
    2076     AssertPtr(pCpu);
    2077     Assert(pCpu->idCpu != NIL_RTCPUID);
     2073    AssertPtr(pHostCpu);
     2074    Assert(pHostCpu->idCpu != NIL_RTCPUID);
    20782075    AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
    20792076    AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
     
    20852082     * cannot reuse the current ASID anymore.
    20862083     */
    2087     if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
    2088         || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
     2084    if (   pVCpu->hm.s.idLastCpu   != pHostCpu->idCpu
     2085        || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    20892086    {
    20902087        pVCpu->hm.s.fForceTLBFlush = true;
     
    20982095         * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
    20992096         * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
    2100          * fExplicitFlush = true here and change the pCpu->fFlushAsidBeforeUse check below to
     2097         * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to
    21012098         * include fExplicitFlush's too) - an obscure corner case.
    21022099         */
     
    21062103
    21072104    PVM pVM = pVCpu->CTX_SUFF(pVM);
    2108     pVCpu->hm.s.idLastCpu = pCpu->idCpu;
     2105    pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
    21092106    if (pVCpu->hm.s.fForceTLBFlush)
    21102107    {
    2111         ++pCpu->uCurrentAsid;
    2112         if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
    2113         {
    2114             pCpu->uCurrentAsid        = 1;       /* Wraparound to 1; host uses 0 */
    2115             pCpu->cTlbFlushes++;                 /* All VCPUs that run on this host CPU must use a new VPID. */
    2116             pCpu->fFlushAsidBeforeUse = true;    /* All VCPUs that run on this host CPU must flush their new VPID before use. */
     2108        ++pHostCpu->uCurrentAsid;
     2109        if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
     2110        {
     2111            pHostCpu->uCurrentAsid        = 1;     /* Wraparound to 1; host uses 0 */
     2112            pHostCpu->cTlbFlushes++;               /* All VCPUs that run on this host CPU must use a new VPID. */
     2113            pHostCpu->fFlushAsidBeforeUse = true;  /* All VCPUs that run on this host CPU must flush their new VPID before use. */
    21172114        }
    21182115
    21192116        pVCpu->hm.s.fForceTLBFlush = false;
    2120         pVCpu->hm.s.cTlbFlushes    = pCpu->cTlbFlushes;
    2121         pVCpu->hm.s.uCurrentAsid   = pCpu->uCurrentAsid;
    2122         if (pCpu->fFlushAsidBeforeUse)
     2117        pVCpu->hm.s.cTlbFlushes    = pHostCpu->cTlbFlushes;
     2118        pVCpu->hm.s.uCurrentAsid   = pHostCpu->uCurrentAsid;
     2119        if (pHostCpu->fFlushAsidBeforeUse)
    21232120        {
    21242121            if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
     
    21272124            {
    21282125                hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
    2129                 pCpu->fFlushAsidBeforeUse = false;
     2126                pHostCpu->fFlushAsidBeforeUse = false;
    21302127            }
    21312128            else
     
    21372134    }
    21382135
    2139     AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
    2140               ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
    2141     AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
    2142               ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
    2143                pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
     2136    AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
     2137              ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
     2138    AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
     2139              ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
     2140               pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
    21442141    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
    2145               ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
     2142              ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
    21462143
    21472144    int rc  = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
     
    21532150 * Flushes the guest TLB entry based on CPU capabilities.
    21542151 *
    2155  * @param   pVCpu     The cross context virtual CPU structure.
    2156  * @param   pCpu      Pointer to the global HM CPU struct.
    2157  */
    2158 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2152 * @param   pHostCpu    The HM physical-CPU structure.
     2153 * @param   pVCpu       The cross context virtual CPU structure.
     2154 *
     2155 * @remarks Called with interrupts disabled.
     2156 */
     2157DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    21592158{
    21602159#ifdef HMVMX_ALWAYS_FLUSH_TLB
     
    21642163    switch (pVM->hm.s.vmx.enmTlbFlushType)
    21652164    {
    2166         case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVCpu, pCpu); break;
    2167         case VMXTLBFLUSHTYPE_EPT:      hmR0VmxFlushTaggedTlbEpt(pVCpu, pCpu);  break;
    2168         case VMXTLBFLUSHTYPE_VPID:     hmR0VmxFlushTaggedTlbVpid(pVCpu, pCpu); break;
    2169         case VMXTLBFLUSHTYPE_NONE:     hmR0VmxFlushTaggedTlbNone(pVCpu, pCpu); break;
     2165        case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu); break;
     2166        case VMXTLBFLUSHTYPE_EPT:      hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu);  break;
     2167        case VMXTLBFLUSHTYPE_VPID:     hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;
     2168        case VMXTLBFLUSHTYPE_NONE:     hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;
    21702169        default:
    21712170            AssertMsgFailed(("Invalid flush-tag function identifier\n"));
     
    28212820                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    28222821
    2823         pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     2822        pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    28242823
    28252824        hmR0VmxUpdateErrorRecord(pVCpu, rc);
     
    49824981     * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    49834982     */
    4984     bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
     4983    bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
    49854984    /** @todo Add stats for resume vs launch. */
    49864985    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    52845283#endif
    52855284
    5286     PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
    5287     RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
     5285    PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
     5286    RTHCPHYS HCPhysCpuPage = pHostCpu->HCPhysMemObj;
    52885287
    52895288    /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
    52905289    VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    5291     pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     5290    pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    52925291
    52935292    /* Leave VMX Root Mode. */
     
    53235322    rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    53245323    AssertRC(rc2);
    5325     pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     5324    pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    53265325    Assert(!(ASMGetFlags() & X86_EFL_IF));
    53275326    ASMSetFlags(fOldEFlags);
     
    53455344    NOREF(fResume);
    53465345
    5347     PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
    5348     RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
     5346    PCHMPHYSCPU    pHostCpu      = hmR0GetCurrentCpu();
     5347    RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
    53495348
    53505349#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    72117210     *  context.
    72127211     */
    7213     if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
     7212    if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
    72147213    {
    72157214        int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    72167215        AssertRCReturn(rc, rc);
    72177216
    7218         pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     7217        pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    72197218        Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
    72207219    }
    7221     Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
     7220    Assert(!(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
    72227221    NOREF(idCpu);
    72237222
     
    74137412        pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
    74147413        VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    7415         if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
     7414        if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
    74167415        {
    74177416            VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    7418             pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     7417            pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    74197418        }
    74207419
     
    80278026 * @returns VBox status code.
    80288027 * @param   pVCpu       The cross context virtual CPU structure.
    8029  * @param   pHostCpu    Pointer to the global CPU info struct.
    8030  */
    8031 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
     8028 */
     8029VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu)
    80328030{
    80338031    AssertPtr(pVCpu);
    80348032    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
    80358033    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    8036     RT_NOREF(pHostCpu);
    80378034
    80388035    LogFlowFunc(("pVCpu=%p\n", pVCpu));
     
    80538050     * Load the VCPU's VMCS as the current (and active) one.
    80548051     */
    8055     Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
     8052    Assert(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR);
    80568053    int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    8057     if (RT_FAILURE(rc))
    8058         return rc;
    8059 
    8060     pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    8061     pVCpu->hm.s.fLeaveDone = false;
    8062     Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    8063 
    8064     return VINF_SUCCESS;
     8054    if (RT_SUCCESS(rc))
     8055    {
     8056        pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     8057        pVCpu->hm.s.fLeaveDone = false;
     8058        Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
     8059    }
     8060    return rc;
    80658061}
    80668062
     
    81318127
    81328128            /* Load the active VMCS as the current one. */
    8133             if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
     8129            if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR)
    81348130            {
    81358131                rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    81368132                AssertRC(rc); NOREF(rc);
    8137                 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     8133                pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    81388134                Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    81398135            }
     
    86938689        pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR];
    86948690
    8695     PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
    8696     RTCPUID  idCurrentCpu = pCpu->idCpu;
     8691    PHMPHYSCPU pHostCpu    = hmR0GetCurrentCpu();
     8692    RTCPUID    idCurrentCpu = pHostCpu->idCpu;
    86978693    if (   pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
    86988694        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
     
    87038699
    87048700    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    8705     hmR0VmxFlushTaggedTlb(pVCpu, pCpu);                         /* Invalidate the appropriate guest entries from the TLB. */
     8701    hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu);                     /* Invalidate the appropriate guest entries from the TLB. */
    87068702    Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
    87078703    pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu;      /* Update the error reporting info. with the current host CPU. */
     
    88008796#endif
    88018797#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    8802     /* The 64-on-32 switcher maintains uVmcsState on its own and we need to leave it alone here. */
     8798    /* The 64-on-32 switcher maintains fVmcsState on its own and we need to leave it alone here. */
    88038799    if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
    8804         pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;      /* Use VMRESUME instead of VMLAUNCH in the next run. */
     8800        pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;      /* Use VMRESUME instead of VMLAUNCH in the next run. */
    88058801#else
    8806     pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
     8802    pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
    88078803#endif
    88088804#ifdef VBOX_STRICT
     
    92749270        } else do { } while (0)
    92759271
    9276     SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         VMX_EXIT_TASK_SWITCH);      /* unconditional */
    9277     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION,   VMX_EXIT_EPT_VIOLATION);    /* unconditional */
    9278     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG,   VMX_EXIT_EPT_MISCONFIG);    /* unconditional (unless #VE) */
    9279     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS,    VMX_EXIT_APIC_ACCESS);      /* feature dependent, nothing to enable here */
    9280     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE,     VMX_EXIT_APIC_WRITE);       /* feature dependent, nothing to enable here */
    9281 
    9282     SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID,              VMX_EXIT_CPUID);            /* unconditional */
     9272    SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         VMX_EXIT_TASK_SWITCH);   /* unconditional */
     9273    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION,   VMX_EXIT_EPT_VIOLATION); /* unconditional */
     9274    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG,   VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
     9275    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS,    VMX_EXIT_APIC_ACCESS);   /* feature dependent, nothing to enable here */
     9276    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE,     VMX_EXIT_APIC_WRITE);    /* feature dependent, nothing to enable here */
     9277
     9278    SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID,              VMX_EXIT_CPUID);         /* unconditional */
    92839279    SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID,              VMX_EXIT_CPUID);
    9284     SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC,             VMX_EXIT_GETSEC);           /* unconditional */
     9280    SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC,             VMX_EXIT_GETSEC);        /* unconditional */
    92859281    SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC,             VMX_EXIT_GETSEC);
    92869282    SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT,               VMX_EXIT_HLT,      VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
    92879283    SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT,               VMX_EXIT_HLT);
    9288     SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD,               VMX_EXIT_INVD);             /* unconditional */
     9284    SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD,               VMX_EXIT_INVD);          /* unconditional */
    92899285    SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD,               VMX_EXIT_INVD);
    92909286    SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG,             VMX_EXIT_INVLPG,   VMX_PROC_CTLS_INVLPG_EXIT);
     
    92949290    SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC,              VMX_EXIT_RDTSC,    VMX_PROC_CTLS_RDTSC_EXIT);
    92959291    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC,              VMX_EXIT_RDTSC);
    9296     SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM,                VMX_EXIT_RSM);              /* unconditional */
     9292    SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM,                VMX_EXIT_RSM);           /* unconditional */
    92979293    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM,                VMX_EXIT_RSM);
    9298     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL,           VMX_EXIT_VMCALL);           /* unconditional */
     9294    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL,           VMX_EXIT_VMCALL);        /* unconditional */
    92999295    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL,           VMX_EXIT_VMCALL);
    9300     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);          /* unconditional */
     9296    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);       /* unconditional */
    93019297    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);
    9302     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);         /* unconditional */
     9298    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);      /* unconditional */
    93039299    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);
    9304     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);          /* unconditional */
     9300    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);       /* unconditional */
    93059301    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);
    9306     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST,        VMX_EXIT_VMPTRST);          /* unconditional */
     9302    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST,        VMX_EXIT_VMPTRST);       /* unconditional */
    93079303    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST,        VMX_EXIT_VMPTRST);
    9308     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD,         VMX_EXIT_VMREAD);           /* unconditional */
     9304    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD,         VMX_EXIT_VMREAD);        /* unconditional */
    93099305    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD,         VMX_EXIT_VMREAD);
    9310     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME,       VMX_EXIT_VMRESUME);         /* unconditional */
     9306    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME,       VMX_EXIT_VMRESUME);      /* unconditional */
    93119307    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME,       VMX_EXIT_VMRESUME);
    9312     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE,        VMX_EXIT_VMWRITE);          /* unconditional */
     9308    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE,        VMX_EXIT_VMWRITE);       /* unconditional */
    93139309    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE,        VMX_EXIT_VMWRITE);
    9314     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF,         VMX_EXIT_VMXOFF);           /* unconditional */
     9310    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF,         VMX_EXIT_VMXOFF);        /* unconditional */
    93159311    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF,         VMX_EXIT_VMXOFF);
    9316     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON,          VMX_EXIT_VMXON);            /* unconditional */
     9312    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON,          VMX_EXIT_VMXON);         /* unconditional */
    93179313    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON,          VMX_EXIT_VMXON);
    93189314
     
    94009396    SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR,                VMX_EXIT_LDTR_TR_ACCESS);
    94019397
    9402     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT,         VMX_EXIT_INVEPT);           /* unconditional */
     9398    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT,         VMX_EXIT_INVEPT);        /* unconditional */
    94039399    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT,         VMX_EXIT_INVEPT);
    94049400    SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP,             VMX_EXIT_RDTSCP,   VMX_PROC_CTLS_RDTSC_EXIT);
    94059401    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP,             VMX_EXIT_RDTSCP);
    9406     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID,        VMX_EXIT_INVVPID);          /* unconditional */
     9402    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID,        VMX_EXIT_INVVPID);       /* unconditional */
    94079403    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID,        VMX_EXIT_INVVPID);
    94089404    SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD,             VMX_EXIT_WBINVD,   VMX_PROC_CTLS2_WBINVD_EXIT);
    94099405    SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD,             VMX_EXIT_WBINVD);
    9410     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV,             VMX_EXIT_XSETBV);           /* unconditional */
     9406    SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV,             VMX_EXIT_XSETBV);        /* unconditional */
    94119407    SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV,             VMX_EXIT_XSETBV);
    94129408    SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND,             VMX_EXIT_RDRAND,   VMX_PROC_CTLS2_RDRAND_EXIT);
     
    94149410    SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID,        VMX_EXIT_INVPCID,  VMX_PROC_CTLS_INVLPG_EXIT);
    94159411    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID,        VMX_EXIT_INVPCID);
    9416     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC,         VMX_EXIT_VMFUNC);           /* unconditional for the current setup */
     9412    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC,         VMX_EXIT_VMFUNC);        /* unconditional for the current setup */
    94179413    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC,         VMX_EXIT_VMFUNC);
    94189414    SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED,             VMX_EXIT_RDSEED,   VMX_PROC_CTLS2_RDSEED_EXIT);
    94199415    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED,             VMX_EXIT_RDSEED);
    9420     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES,             VMX_EXIT_XSAVES);           /* unconditional (enabled by host, guest cfg) */
     9416    SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES,             VMX_EXIT_XSAVES);        /* unconditional (enabled by host, guest cfg) */
    94219417    SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES,              VMX_EXIT_XSAVES);
    9422     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS,            VMX_EXIT_XRSTORS);          /* unconditional (enabled by host, guest cfg) */
     9418    SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS,            VMX_EXIT_XRSTORS);       /* unconditional (enabled by host, guest cfg) */
    94239419    SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS,            VMX_EXIT_XRSTORS);
    94249420
     
    1196711963    }
    1196811964    else
    11969         AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     11965        AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
    1197011966
    1197111967    return rcStrict;
     
    1211012106    }
    1211112107    else
    12112         AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     12108        AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
    1211312109
    1211412110    return rcStrict;
     
    1323213228        if (   !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
    1323313229            || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
    13234             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
    13235                                    pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13230            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
     13231                                   pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1323613232        else
    1323713233            rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette