VirtualBox

Changeset 77453 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Feb 25, 2019 9:38:45 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
129024
Message:

VMM/IEM: Nested VMX: bugref:9180 Better branch prediction.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r77425 r77453  
    17721772    /* Segment attribute bits 31:17 and 11:8 MBZ. */
    17731773    uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT  | X86DESCATTR_DPL | X86DESCATTR_P
    1774                                   | X86DESCATTR_AVL  | X86DESCATTR_L   | X86DESCATTR_D   | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
     1774                                  | X86DESCATTR_AVL  | X86DESCATTR_L   | X86DESCATTR_D   | X86DESCATTR_G
     1775                                  | X86DESCATTR_UNUSABLE;
    17751776    /* LDTR. */
    17761777    {
     
    51135114        if (fUnrestrictedGuest)
    51145115            u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
    5115         if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
     5116        if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
     5117        { /* likely */ }
     5118        else
    51165119            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
    51175120
    51185121        /* CR0 MBZ bits. */
    51195122        uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
    5120         if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
     5123        if (!(pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1))
     5124        { /* likely */ }
     5125        else
    51215126            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
    51225127
     
    51325137        /* CR4 MB1 bits. */
    51335138        uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
    5134         if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
     5139        if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
     5140        { /* likely */ }
     5141        else
    51355142            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
    51365143
    51375144        /* CR4 MBZ bits. */
    51385145        uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
    5139         if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
     5146        if (!(pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1))
     5147        { /* likely */ }
     5148        else
    51405149            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
    51415150    }
    51425151
    51435152    /* DEBUGCTL MSR. */
    5144     if (   (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    5145         && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
     5153    if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
     5154        || !(pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
     5155    { /* likely */ }
     5156    else
    51465157        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
    51475158
     
    51755186
    51765187        /* DR7. */
    5177         if (   (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    5178             && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
     5188        if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
     5189            || !(pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
     5190        { /* likely */ }
     5191        else
    51795192            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
    51805193
     
    51915204
    51925205    /* PAT MSR. */
    5193     if (   (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
    5194         && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
     5206    if (   !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
     5207        ||  CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
     5208    { /* likely */ }
     5209    else
    51955210        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
    51965211
     
    59045919
    59055920    /* We don't support RTM (Real-time Transactional Memory) yet. */
    5906     if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
     5921    if (!(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM))
     5922    { /* likely */ }
     5923    else
    59075924        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
    59085925
     
    59235940
    59245941        /* Validate the address. */
    5925         if (   (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
    5926             || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    5927             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
     5942        if (   !(GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
     5943            && !(GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     5944            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
     5945        { /* likely */ }
     5946        else
    59285947        {
    59295948            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
     
    59355954        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
    59365955                                         GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
    5937         if (RT_FAILURE(rc))
     5956        if (RT_SUCCESS(rc))
     5957        { /* likely */ }
     5958        else
    59385959        {
    59395960            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
     
    61046125        /* CR0 MB1 bits. */
    61056126        uint64_t const u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
    6106         if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
     6127        if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
     6128        { /* likely */ }
     6129        else
    61076130            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
    61086131
    61096132        /* CR0 MBZ bits. */
    61106133        uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
    6111         if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
     6134        if (!(pVmcs->u64HostCr0.u & ~u64Cr0Fixed1))
     6135        { /* likely */ }
     6136        else
    61126137            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
    61136138    }
     
    61176142        /* CR4 MB1 bits. */
    61186143        uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
    6119         if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
     6144        if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
     6145        { /* likely */ }
     6146        else
    61206147            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
    61216148
    61226149        /* CR4 MBZ bits. */
    61236150        uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
    6124         if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
     6151        if (!(pVmcs->u64HostCr4.u & ~u64Cr4Fixed1))
     6152        { /* likely */ }
     6153        else
    61256154            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
    61266155    }
     
    62946323    /* VM-entry controls. */
    62956324    VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
    6296     if (~pVmcs->u32EntryCtls & EntryCtls.n.allowed0)
     6325    if (!(~pVmcs->u32EntryCtls & EntryCtls.n.allowed0))
     6326    { /* likely */ }
     6327    else
    62976328        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
    62986329
    6299     if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
     6330    if (!(pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1))
     6331    { /* likely */ }
     6332    else
    63006333        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
    63016334
     
    63666399    if (pVmcs->u32EntryMsrLoadCount)
    63676400    {
    6368         if (   (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
    6369             || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6370             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
     6401        if (   !(pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
     6402            && !(pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6403            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
     6404        { /* likely */ }
     6405        else
    63716406            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
    63726407    }
     
    63966431    /* VM-exit controls. */
    63976432    VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
    6398     if (~pVmcs->u32ExitCtls & ExitCtls.n.allowed0)
     6433    if (!(~pVmcs->u32ExitCtls & ExitCtls.n.allowed0))
     6434    { /* likely */ }
     6435    else
    63996436        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
    64006437
    6401     if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
     6438    if (!(pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1))
     6439    { /* likely */ }
     6440    else
    64026441        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
    64036442
    64046443    /* Save preemption timer without activating it. */
    6405     if (   !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
    6406         && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
     6444    if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
     6445        || !(pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
     6446    { /* likely */ }
     6447    else
    64076448        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
    64086449
     
    64106451    if (pVmcs->u32ExitMsrStoreCount)
    64116452    {
    6412         if (   (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
    6413             || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6414             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
     6453        if (   !(pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
     6454            && !(pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6455            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
     6456        { /* likely */ }
     6457        else
    64156458            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
    64166459    }
     
    64196462    if (pVmcs->u32ExitMsrLoadCount)
    64206463    {
    6421         if (   (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
    6422             || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6423             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
     6464        if (   !(pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
     6465            && !(pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6466            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
     6467        { /* likely */ }
     6468        else
    64246469            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
    64256470    }
     
    64506495    {
    64516496        VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
    6452         if (~pVmcs->u32PinCtls & PinCtls.n.allowed0)
     6497        if (!(~pVmcs->u32PinCtls & PinCtls.n.allowed0))
     6498        { /* likely */ }
     6499        else
    64536500            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
    64546501
    6455         if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
     6502        if (!(pVmcs->u32PinCtls & ~PinCtls.n.allowed1))
     6503        { /* likely */ }
     6504        else
    64566505            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
    64576506    }
     
    64606509    {
    64616510        VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
    6462         if (~pVmcs->u32ProcCtls & ProcCtls.n.allowed0)
     6511        if (!(~pVmcs->u32ProcCtls & ProcCtls.n.allowed0))
     6512        { /* likely */ }
     6513        else
    64636514            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
    64646515
    6465         if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
     6516        if (!(pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1))
     6517        { /* likely */ }
     6518        else
    64666519            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
    64676520    }
     
    64716524    {
    64726525        VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
    6473         if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0)
     6526        if (!(~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0))
     6527        { /* likely */ }
     6528        else
    64746529            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
    64756530
    6476         if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
     6531        if (!(pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1))
     6532        { /* likely */ }
     6533        else
    64776534            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
    64786535    }
     
    64896546    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
    64906547    {
    6491         if (   (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
    6492             || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6493             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
     6548        if (   !(pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
     6549            && !(pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6550            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
     6551        { /* likely */ }
     6552        else
    64946553            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
    64956554
    6496         if (   (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
    6497             || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6498             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
     6555        if (   !(pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
     6556            && !(pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6557            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
     6558        { /* likely */ }
     6559        else
    64996560            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
    65006561    }
     
    65046565    {
    65056566        RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
    6506         if (   (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
    6507             || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6508             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
     6567        if (   !(GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
     6568            && !(GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6569            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
     6570        { /* likely */ }
     6571        else
    65096572            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
    65106573
     
    65136576        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
    65146577                                         GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
    6515         if (RT_FAILURE(rc))
     6578        if (RT_SUCCESS(rc))
     6579        { /* likely */ }
     6580        else
    65166581            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
    65176582    }
     
    65226587        /* Virtual-APIC page physical address. */
    65236588        RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
    6524         if (   (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
    6525             || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6526             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
     6589        if (   !(GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
     6590            && !(GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6591            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
     6592        { /* likely */ }
     6593        else
    65276594            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
    65286595
     
    65316598        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
    65326599                                         GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
    6533         if (RT_FAILURE(rc))
     6600        if (RT_SUCCESS(rc))
     6601        { /* likely */ }
     6602        else
    65346603            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
    65356604
     
    65656634
    65666635    /* NMI exiting and virtual-NMIs. */
    6567     if (   !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
    6568         &&  (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
     6636    if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
     6637        || !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
     6638    { /* likely */ }
     6639    else
    65696640        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
    65706641
    65716642    /* Virtual-NMIs and NMI-window exiting. */
    6572     if (   !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    6573         && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
     6643    if (    (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     6644        || !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
     6645    { /* likely */ }
     6646    else
    65746647        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
    65756648
     
    65796652        /* APIC-access physical address. */
    65806653        RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
    6581         if (   (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
    6582             || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6583             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
     6654        if (   !(GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
     6655            && !(GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6656            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
     6657        { /* likely */ }
     6658        else
    65846659            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
    65856660
     
    65946669        {
    65956670            RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
    6596             if (GCPhysVirtApic == GCPhysApicAccess)
     6671            if (GCPhysVirtApic != GCPhysApicAccess)
     6672            { /* likely */ }
     6673            else
    65976674                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
    65986675        }
     
    66146691                                            pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
    66156692                                            NIL_RTR0PTR /* pvUserR0 */,  NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
    6616         if (RT_FAILURE(rc))
     6693        if (RT_SUCCESS(rc))
     6694        { /* likely */ }
     6695        else
    66176696            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
    66186697    }
    66196698
    66206699    /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
    6621     if (   (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
    6622         && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
     6700    if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
     6701        || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
     6702    { /* likely */ }
     6703    else
    66236704        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
    66246705
    66256706    /* Virtual-interrupt delivery requires external interrupt exiting. */
    6626     if (   (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
    6627         && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
     6707    if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
     6708        ||  (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
     6709    { /* likely */ }
     6710    else
    66286711        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
    66296712
     
    66486731        /* VMREAD-bitmap physical address. */
    66496732        RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
    6650         if (   ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
    6651             || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6652             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
     6733        if (   !(GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
     6734            && !(GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6735            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
     6736        { /* likely */ }
     6737        else
    66536738            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
    66546739
    66556740        /* VMWRITE-bitmap physical address. */
    66566741        RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
    6657         if (   ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
    6658             || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    6659             || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
     6742        if (   !(GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
     6743            && !(GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     6744            &&  PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
     6745        { /* likely */ }
     6746        else
    66606747            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
    66616748
     
    66646751        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
    66656752                                         GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
    6666         if (RT_FAILURE(rc))
     6753        if (RT_SUCCESS(rc))
     6754        { /* likely */ }
     6755        else
    66676756            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
    66686757
     
    66716760        rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
    66726761                                     GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
    6673         if (RT_FAILURE(rc))
     6762        if (RT_SUCCESS(rc))
     6763        { /* likely */ }
     6764        else
    66746765            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
    66756766    }
     
    73707461    /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we
    73717462     *        use block-by-STI here which is not quite correct. */
    7372     if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    7373         && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
     7463    if (   !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     7464        ||  pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
     7465    { /* likely */ }
     7466    else
    73747467    {
    73757468        Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
     
    76607753
    76617754    /* VMCS pointer in root mode. */
    7662     if (    IEM_VMX_IS_ROOT_MODE(pVCpu)
    7663         && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     7755    if (   !IEM_VMX_IS_ROOT_MODE(pVCpu)
     7756        ||  IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     7757    { /* likely */ }
     7758    else
    76647759    {
    76657760        Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
     
    76717766
    76727767    /* VMCS-link pointer in non-root mode. */
    7673     if (    IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
    7674         && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
     7768    if (   !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
     7769        ||  IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
     7770    { /* likely */ }
     7771    else
    76757772    {
    76767773        Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
     
    78837980
    78847981    /* VMCS pointer in root mode. */
    7885     if (    IEM_VMX_IS_ROOT_MODE(pVCpu)
    7886         && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     7982    if (   !IEM_VMX_IS_ROOT_MODE(pVCpu)
     7983        ||  IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     7984    { /* likely */ }
     7985    else
    78877986    {
    78887987        Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
     
    78947993
    78957994    /* VMCS-link pointer in non-root mode. */
    7896     if (    IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
    7897         && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
     7995    if (   !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
     7996        ||  IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
     7997    { /* likely */ }
     7998    else
    78987999    {
    78998000        Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
     
    79468047    /* Read-only VMCS field. */
    79478048    bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
    7948     if (   fIsFieldReadOnly
    7949         && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
     8049    if (   !fIsFieldReadOnly
     8050        ||  IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
     8051    { /* likely */ }
     8052    else
    79508053    {
    79518054        Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
     
    80388141    RTGCPHYS GCPhysVmcs;
    80398142    VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
    8040     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     8143    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     8144    { /* likely */ }
     8145    else
    80418146    {
    80428147        Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
     
    80468151
    80478152    /* VMCS pointer alignment. */
    8048     if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
     8153    if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
     8154    { /* likely */ }
     8155    else
    80498156    {
    80508157        Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
     
    80568163
    80578164    /* VMCS physical-address width limits. */
    8058     if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     8165    if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
     8166    { /* likely */ }
     8167    else
    80598168    {
    80608169        Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
     
    80668175
    80678176    /* VMCS is not the VMXON region. */
    8068     if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
     8177    if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
     8178    { /* likely */ }
     8179    else
    80698180    {
    80708181        Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
     
    80778188    /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
    80788189       restriction imposed by our implementation. */
    8079     if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
     8190    if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
     8191    { /* likely */ }
     8192    else
    80808193    {
    80818194        Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
     
    82118324    RTGCPHYS GCPhysVmcs;
    82128325    VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
    8213     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     8326    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     8327    { /* likely */ }
     8328    else
    82148329    {
    82158330        Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
     
    82198334
    82208335    /* VMCS pointer alignment. */
    8221     if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
     8336    if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
     8337    { /* likely */ }
     8338    else
    82228339    {
    82238340        Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
     
    82298346
    82308347    /* VMCS physical-address width limits. */
    8231     if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     8348    if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
     8349    { /* likely */ }
     8350    else
    82328351    {
    82338352        Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
     
    82398358
    82408359    /* VMCS is not the VMXON region. */
    8241     if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
     8360    if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
     8361    { /* likely */ }
     8362    else
    82428363    {
    82438364        Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
     
    82508371    /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
    82518372       restriction imposed by our implementation. */
    8252     if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
     8373    if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
     8374    { /* likely */ }
     8375    else
    82538376    {
    82548377        Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
     
    82628385    VMXVMCSREVID VmcsRevId;
    82638386    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
    8264     if (RT_FAILURE(rc))
     8387    if (RT_SUCCESS(rc))
     8388    { /* likely */ }
     8389    else
    82658390    {
    82668391        Log(("vmptrld: Failed to read revision identifier from VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
     
    82738398     * Verify the VMCS is not a shadow VMCS, if the VMCS shadowing feature is supported.
    82748399     */
    8275     if (   VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
    8276         || (   VmcsRevId.n.fIsShadowVmcs
    8277             && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
     8400    if (   VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID
     8401        && (   !VmcsRevId.n.fIsShadowVmcs
     8402            ||  IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
     8403    { /* likely */ }
     8404    else
    82788405    {
    82798406        if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
     
    83198446        rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), GCPhysVmcs,
    83208447                                     sizeof(VMXVVMCS));
    8321         if (RT_FAILURE(rc))
     8448        if (RT_SUCCESS(rc))
     8449        { /* likely */ }
     8450        else
    83228451        {
    83238452            Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
     
    83798508            /* CR0 MB1 bits. */
    83808509            uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
    8381             if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
     8510            if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) == uCr0Fixed0)
     8511            { /* likely */ }
     8512            else
    83828513            {
    83838514                Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
     
    83888519            /* CR0 MBZ bits. */
    83898520            uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
    8390             if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
     8521            if (!(pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1))
     8522            { /* likely */ }
     8523            else
    83918524            {
    83928525                Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
     
    84008533            /* CR4 MB1 bits. */
    84018534            uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
    8402             if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
     8535            if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) == uCr4Fixed0)
     8536            { /* likely */ }
     8537            else
    84038538            {
    84048539                Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
     
    84098544            /* CR4 MBZ bits. */
    84108545            uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
    8411             if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
     8546            if (!(pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1))
     8547            { /* likely */ }
     8548            else
    84128549            {
    84138550                Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
     
    84208557        uint64_t const uMsrFeatCtl = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64FeatCtrl;
    84218558        if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
    8422                         != (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
     8559                        == (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
     8560        { /* likely */ }
     8561        else
    84238562        {
    84248563            Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
     
    84308569        RTGCPHYS GCPhysVmxon;
    84318570        VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
    8432         if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     8571        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     8572        { /* likely */ }
     8573        else
    84338574        {
    84348575            Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
     
    84388579
    84398580        /* VMXON region pointer alignment. */
    8440         if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
     8581        if (!(GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK))
     8582        { /* likely */ }
     8583        else
    84418584        {
    84428585            Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
     
    84488591
    84498592        /* VMXON physical-address width limits. */
    8450         if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     8593        if (!(GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
     8594        { /* likely */ }
     8595        else
    84518596        {
    84528597            Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
     
    84598604        /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
    84608605           restriction imposed by our implementation. */
    8461         if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
     8606        if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
     8607        { /* likely */ }
     8608        else
    84628609        {
    84638610            Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
     
    84718618        VMXVMCSREVID VmcsRevId;
    84728619        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
    8473         if (RT_FAILURE(rc))
     8620        if (RT_SUCCESS(rc))
     8621        { /* likely */ }
     8622        else
    84748623        {
    84758624            Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
     
    84798628
    84808629        /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
    8481         if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
     8630        if (RT_UNLIKELY(VmcsRevId.u == VMX_V_VMCS_REVISION_ID))
     8631        { /* likely */ }
     8632        else
    84828633        {
    84838634            /* Revision ID mismatch. */
     
    85658716    /* Dual monitor treatment of SMIs and SMM. */
    85668717    uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
    8567     if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
     8718    if (!(fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID))
     8719    { /* likely */ }
     8720    else
    85688721    {
    85698722        iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette