Changeset 77453 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Feb 25, 2019 9:38:45 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 129024
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r77425 r77453 1772 1772 /* Segment attribute bits 31:17 and 11:8 MBZ. */ 1773 1773 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P 1774 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE; 1774 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G 1775 | X86DESCATTR_UNUSABLE; 1775 1776 /* LDTR. */ 1776 1777 { … … 5113 5114 if (fUnrestrictedGuest) 5114 5115 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); 5115 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0) 5116 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0) 5117 { /* likely */ } 5118 else 5116 5119 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0); 5117 5120 5118 5121 /* CR0 MBZ bits. */ 5119 5122 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1; 5120 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1) 5123 if (!(pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)) 5124 { /* likely */ } 5125 else 5121 5126 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1); 5122 5127 … … 5132 5137 /* CR4 MB1 bits. */ 5133 5138 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 5134 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0) 5139 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0) 5140 { /* likely */ } 5141 else 5135 5142 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0); 5136 5143 5137 5144 /* CR4 MBZ bits. */ 5138 5145 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1; 5139 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1) 5146 if (!(pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)) 5147 { /* likely */ } 5148 else 5140 5149 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1); 5141 5150 } 5142 5151 5143 5152 /* DEBUGCTL MSR. */ 5144 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 5145 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL)) 5153 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 5154 || !(pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL)) 5155 { /* likely */ } 5156 else 5146 5157 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl); 5147 5158 … … 5175 5186 5176 5187 /* DR7. */ 5177 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 5178 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK)) 5188 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 5189 || !(pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK)) 5190 { /* likely */ } 5191 else 5179 5192 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7); 5180 5193 … … 5191 5204 5192 5205 /* PAT MSR. */ 5193 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR) 5194 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u)) 5206 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR) 5207 || CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u)) 5208 { /* likely */ } 5209 else 5195 5210 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr); 5196 5211 … … 5904 5919 5905 5920 /* We don't support RTM (Real-time Transactional Memory) yet. */ 5906 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM) 5921 if (!(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)) 5922 { /* likely */ } 5923 else 5907 5924 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm); 5908 5925 … … 5923 5940 5924 5941 /* Validate the address. */ 5925 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK) 5926 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 5927 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs)) 5942 if ( !(GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK) 5943 && !(GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 5944 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs)) 5945 { /* likely */ } 5946 else 5928 5947 { 5929 5948 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR); … … 5935 5954 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs), 5936 5955 GCPhysShadowVmcs, VMX_V_VMCS_SIZE); 5937 if (RT_FAILURE(rc)) 5956 if (RT_SUCCESS(rc)) 5957 { /* likely */ } 5958 else 5938 5959 { 5939 5960 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR); … … 6104 6125 /* CR0 MB1 bits. */ 6105 6126 uint64_t const u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0; 6106 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0) 6127 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0) 6128 { /* likely */ } 6129 else 6107 6130 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0); 6108 6131 6109 6132 /* CR0 MBZ bits. */ 6110 6133 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1; 6111 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1) 6134 if (!(pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)) 6135 { /* likely */ } 6136 else 6112 6137 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1); 6113 6138 } … … 6117 6142 /* CR4 MB1 bits. */ 6118 6143 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 6119 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0) 6144 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0) 6145 { /* likely */ } 6146 else 6120 6147 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0); 6121 6148 6122 6149 /* CR4 MBZ bits. */ 6123 6150 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1; 6124 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1) 6151 if (!(pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)) 6152 { /* likely */ } 6153 else 6125 6154 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1); 6126 6155 } … … 6294 6323 /* VM-entry controls. */ 6295 6324 VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls; 6296 if (~pVmcs->u32EntryCtls & EntryCtls.n.allowed0) 6325 if (!(~pVmcs->u32EntryCtls & EntryCtls.n.allowed0)) 6326 { /* likely */ } 6327 else 6297 6328 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0); 6298 6329 6299 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1) 6330 if (!(pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)) 6331 { /* likely */ } 6332 else 6300 6333 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1); 6301 6334 … … 6366 6399 if (pVmcs->u32EntryMsrLoadCount) 6367 6400 { 6368 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 6369 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6370 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u)) 6401 if ( !(pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 6402 && !(pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6403 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u)) 6404 { /* likely */ } 6405 else 6371 6406 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad); 6372 6407 } … … 6396 6431 /* VM-exit controls. */ 6397 6432 VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls; 6398 if (~pVmcs->u32ExitCtls & ExitCtls.n.allowed0) 6433 if (!(~pVmcs->u32ExitCtls & ExitCtls.n.allowed0)) 6434 { /* likely */ } 6435 else 6399 6436 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0); 6400 6437 6401 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1) 6438 if (!(pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)) 6439 { /* likely */ } 6440 else 6402 6441 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1); 6403 6442 6404 6443 /* Save preemption timer without activating it. */ 6405 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER) 6406 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)) 6444 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER) 6445 || !(pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)) 6446 { /* likely */ } 6447 else 6407 6448 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer); 6408 6449 … … 6410 6451 if (pVmcs->u32ExitMsrStoreCount) 6411 6452 { 6412 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK) 6413 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6414 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u)) 6453 if ( !(pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK) 6454 && !(pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6455 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u)) 6456 { /* likely */ } 6457 else 6415 6458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore); 6416 6459 } … … 6419 6462 if (pVmcs->u32ExitMsrLoadCount) 6420 6463 { 6421 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 6422 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6423 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u)) 6464 if ( !(pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK) 6465 && !(pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6466 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u)) 6467 { /* likely */ } 6468 else 6424 6469 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad); 6425 6470 } … … 6450 6495 { 6451 6496 VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls; 6452 if (~pVmcs->u32PinCtls & PinCtls.n.allowed0) 6497 if (!(~pVmcs->u32PinCtls & PinCtls.n.allowed0)) 6498 { /* likely */ } 6499 else 6453 6500 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0); 6454 6501 6455 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1) 6502 if (!(pVmcs->u32PinCtls & ~PinCtls.n.allowed1)) 6503 { /* likely */ } 6504 else 6456 6505 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1); 6457 6506 } … … 6460 6509 { 6461 6510 VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls; 6462 if (~pVmcs->u32ProcCtls & ProcCtls.n.allowed0) 6511 if (!(~pVmcs->u32ProcCtls & ProcCtls.n.allowed0)) 6512 { /* likely */ } 6513 else 6463 6514 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0); 6464 6515 6465 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1) 6516 if (!(pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)) 6517 { /* likely */ } 6518 else 6466 6519 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1); 6467 6520 } … … 6471 6524 { 6472 6525 VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2; 6473 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0) 6526 if (!(~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0)) 6527 { /* likely */ } 6528 else 6474 6529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0); 6475 6530 6476 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1) 6531 if (!(pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)) 6532 { /* likely */ } 6533 else 6477 6534 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1); 6478 6535 } … … 6489 6546 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS) 6490 6547 { 6491 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK) 6492 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6493 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u)) 6548 if ( !(pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK) 6549 && !(pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6550 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u)) 6551 { /* likely */ } 6552 else 6494 6553 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA); 6495 6554 6496 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK) 6497 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6498 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u)) 6555 if ( !(pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK) 6556 && !(pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6557 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u)) 6558 { /* likely */ } 6559 else 6499 6560 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB); 6500 6561 } … … 6504 6565 { 6505 6566 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u; 6506 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK) 6507 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6508 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap)) 6567 if ( !(GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK) 6568 && !(GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6569 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap)) 6570 { /* likely */ } 6571 else 6509 6572 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap); 6510 6573 … … 6513 6576 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), 6514 6577 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE); 6515 if (RT_FAILURE(rc)) 6578 if (RT_SUCCESS(rc)) 6579 { /* likely */ } 6580 else 6516 6581 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys); 6517 6582 } … … 6522 6587 /* Virtual-APIC page physical address. */ 6523 6588 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 6524 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK) 6525 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6526 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic)) 6589 if ( !(GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK) 6590 && !(GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6591 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic)) 6592 { /* likely */ } 6593 else 6527 6594 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage); 6528 6595 … … 6531 6598 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage), 6532 6599 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES); 6533 if (RT_FAILURE(rc)) 6600 if (RT_SUCCESS(rc)) 6601 { /* likely */ } 6602 else 6534 6603 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys); 6535 6604 … … 6565 6634 6566 6635 /* NMI exiting and virtual-NMIs. */ 6567 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT) 6568 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 6636 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT) 6637 || !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 6638 { /* likely */ } 6639 else 6569 6640 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi); 6570 6641 6571 6642 /* Virtual-NMIs and NMI-window exiting. */ 6572 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 6573 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)) 6643 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 6644 || !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)) 6645 { /* likely */ } 6646 else 6574 6647 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit); 6575 6648 … … 6579 6652 /* APIC-access physical address. */ 6580 6653 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u; 6581 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK) 6582 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6583 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess)) 6654 if ( !(GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK) 6655 && !(GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6656 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess)) 6657 { /* likely */ } 6658 else 6584 6659 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess); 6585 6660 … … 6594 6669 { 6595 6670 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u; 6596 if (GCPhysVirtApic == GCPhysApicAccess) 6671 if (GCPhysVirtApic != GCPhysApicAccess) 6672 { /* likely */ } 6673 else 6597 6674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic); 6598 6675 } … … 6614 6691 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */, 6615 6692 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */); 6616 if (RT_FAILURE(rc)) 6693 if (RT_SUCCESS(rc)) 6694 { /* likely */ } 6695 else 6617 6696 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg); 6618 6697 } 6619 6698 6620 6699 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */ 6621 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE) 6622 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)) 6700 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE) 6701 || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)) 6702 { /* likely */ } 6703 else 6623 6704 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic); 6624 6705 6625 6706 /* Virtual-interrupt delivery requires external interrupt exiting. */ 6626 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 6627 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)) 6707 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 6708 || (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)) 6709 { /* likely */ } 6710 else 6628 6711 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic); 6629 6712 … … 6648 6731 /* VMREAD-bitmap physical address. */ 6649 6732 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u; 6650 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK) 6651 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6652 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap)) 6733 if ( !(GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK) 6734 && !(GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6735 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap)) 6736 { /* likely */ } 6737 else 6653 6738 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap); 6654 6739 6655 6740 /* VMWRITE-bitmap physical address. */ 6656 6741 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u; 6657 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK) 6658 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6659 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap)) 6742 if ( !(GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK) 6743 && !(GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 6744 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap)) 6745 { /* likely */ } 6746 else 6660 6747 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap); 6661 6748 … … 6664 6751 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap), 6665 6752 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 6666 if (RT_FAILURE(rc)) 6753 if (RT_SUCCESS(rc)) 6754 { /* likely */ } 6755 else 6667 6756 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys); 6668 6757 … … 6671 6760 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap), 6672 6761 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 6673 if (RT_FAILURE(rc)) 6762 if (RT_SUCCESS(rc)) 6763 { /* likely */ } 6764 else 6674 6765 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys); 6675 6766 } … … 7370 7461 /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we 7371 7462 * use block-by-STI here which is not quite correct. */ 7372 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 7373 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu)) 7463 if ( !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 7464 || pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 7465 { /* likely */ } 7466 else 7374 7467 { 7375 7468 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr)); … … 7660 7753 7661 7754 /* VMCS pointer in root mode. */ 7662 if ( IEM_VMX_IS_ROOT_MODE(pVCpu) 7663 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 7755 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu) 7756 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 7757 { /* likely */ } 7758 else 7664 7759 { 7665 7760 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu))); … … 7671 7766 7672 7767 /* VMCS-link pointer in non-root mode. */ 7673 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu) 7674 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu)) 7768 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu) 7769 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu)) 7770 { /* likely */ } 7771 else 7675 7772 { 7676 7773 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu))); … … 7883 7980 7884 7981 /* VMCS pointer in root mode. */ 7885 if ( IEM_VMX_IS_ROOT_MODE(pVCpu) 7886 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 7982 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu) 7983 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu)) 7984 { /* likely */ } 7985 else 7887 7986 { 7888 7987 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu))); … … 7894 7993 7895 7994 /* VMCS-link pointer in non-root mode. */ 7896 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu) 7897 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu)) 7995 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu) 7996 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu)) 7997 { /* likely */ } 7998 else 7898 7999 { 7899 8000 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu))); … … 7946 8047 /* Read-only VMCS field. */ 7947 8048 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc); 7948 if ( fIsFieldReadOnly 7949 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll) 8049 if ( !fIsFieldReadOnly 8050 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll) 8051 { /* likely */ } 8052 else 7950 8053 { 7951 8054 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc)); … … 8038 8141 RTGCPHYS GCPhysVmcs; 8039 8142 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs); 8040 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 8143 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8144 { /* likely */ } 8145 else 8041 8146 { 8042 8147 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict))); … … 8046 8151 8047 8152 /* VMCS pointer alignment. */ 8048 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK) 8153 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)) 8154 { /* likely */ } 8155 else 8049 8156 { 8050 8157 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n")); … … 8056 8163 8057 8164 /* VMCS physical-address width limits. */ 8058 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 8165 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)) 8166 { /* likely */ } 8167 else 8059 8168 { 8060 8169 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n")); … … 8066 8175 8067 8176 /* VMCS is not the VMXON region. */ 8068 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon) 8177 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon) 8178 { /* likely */ } 8179 else 8069 8180 { 8070 8181 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n")); … … 8077 8188 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a 8078 8189 restriction imposed by our implementation. */ 8079 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs)) 8190 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs)) 8191 { /* likely */ } 8192 else 8080 8193 { 8081 8194 Log(("vmclear: VMCS not normal memory -> VMFail()\n")); … … 8211 8324 RTGCPHYS GCPhysVmcs; 8212 8325 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs); 8213 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 8326 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8327 { /* likely */ } 8328 else 8214 8329 { 8215 8330 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict))); … … 8219 8334 8220 8335 /* VMCS pointer alignment. */ 8221 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK) 8336 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)) 8337 { /* likely */ } 8338 else 8222 8339 { 8223 8340 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n")); … … 8229 8346 8230 8347 /* VMCS physical-address width limits. */ 8231 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 8348 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)) 8349 { /* likely */ } 8350 else 8232 8351 { 8233 8352 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n")); … … 8239 8358 8240 8359 /* VMCS is not the VMXON region. */ 8241 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon) 8360 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon) 8361 { /* likely */ } 8362 else 8242 8363 { 8243 8364 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n")); … … 8250 8371 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a 8251 8372 restriction imposed by our implementation. */ 8252 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs)) 8373 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs)) 8374 { /* likely */ } 8375 else 8253 8376 { 8254 8377 Log(("vmptrld: VMCS not normal memory -> VMFail()\n")); … … 8262 8385 VMXVMCSREVID VmcsRevId; 8263 8386 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId)); 8264 if (RT_FAILURE(rc)) 8387 if (RT_SUCCESS(rc)) 8388 { /* likely */ } 8389 else 8265 8390 { 8266 8391 Log(("vmptrld: Failed to read revision identifier from VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc)); … … 8273 8398 * Verify the VMCS is not a shadow VMCS, if the VMCS shadowing feature is supported. 8274 8399 */ 8275 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID 8276 || ( VmcsRevId.n.fIsShadowVmcs 8277 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)) 8400 if ( VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID 8401 && ( !VmcsRevId.n.fIsShadowVmcs 8402 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)) 8403 { /* likely */ } 8404 else 8278 8405 { 8279 8406 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID) … … 8319 8446 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), GCPhysVmcs, 8320 8447 sizeof(VMXVVMCS)); 8321 if (RT_FAILURE(rc)) 8448 if (RT_SUCCESS(rc)) 8449 { /* likely */ } 8450 else 8322 8451 { 8323 8452 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc)); … … 8379 8508 /* CR0 MB1 bits. */ 8380 8509 uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0; 8381 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0) 8510 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) == uCr0Fixed0) 8511 { /* likely */ } 8512 else 8382 8513 { 8383 8514 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n")); … … 8388 8519 /* CR0 MBZ bits. */ 8389 8520 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1; 8390 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1) 8521 if (!(pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)) 8522 { /* likely */ } 8523 else 8391 8524 { 8392 8525 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n")); … … 8400 8533 /* CR4 MB1 bits. */ 8401 8534 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0; 8402 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0) 8535 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) == uCr4Fixed0) 8536 { /* likely */ } 8537 else 8403 8538 { 8404 8539 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n")); … … 8409 8544 /* CR4 MBZ bits. */ 8410 8545 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1; 8411 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1) 8546 if (!(pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)) 8547 { /* likely */ } 8548 else 8412 8549 { 8413 8550 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n")); … … 8420 8557 uint64_t const uMsrFeatCtl = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64FeatCtrl; 8421 8558 if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)) 8422 != (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)) 8559 == (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)) 8560 { /* likely */ } 8561 else 8423 8562 { 8424 8563 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n")); … … 8430 8569 RTGCPHYS GCPhysVmxon; 8431 8570 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon); 8432 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 8571 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8572 { /* likely */ } 8573 else 8433 8574 { 8434 8575 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict))); … … 8438 8579 8439 8580 /* VMXON region pointer alignment. */ 8440 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK) 8581 if (!(GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)) 8582 { /* likely */ } 8583 else 8441 8584 { 8442 8585 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n")); … … 8448 8591 8449 8592 /* VMXON physical-address width limits. */ 8450 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth) 8593 if (!(GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)) 8594 { /* likely */ } 8595 else 8451 8596 { 8452 8597 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n")); … … 8459 8604 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a 8460 8605 restriction imposed by our implementation. */ 8461 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon)) 8606 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon)) 8607 { /* likely */ } 8608 else 8462 8609 { 8463 8610 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n")); … … 8471 8618 VMXVMCSREVID VmcsRevId; 8472 8619 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId)); 8473 if (RT_FAILURE(rc)) 8620 if (RT_SUCCESS(rc)) 8621 { /* likely */ } 8622 else 8474 8623 { 8475 8624 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc)); … … 8479 8628 8480 8629 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */ 8481 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID)) 8630 if (RT_UNLIKELY(VmcsRevId.u == VMX_V_VMCS_REVISION_ID)) 8631 { /* likely */ } 8632 else 8482 8633 { 8483 8634 /* Revision ID mismatch. */ … … 8565 8716 /* Dual monitor treatment of SMIs and SMM. */ 8566 8717 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu); 8567 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID) 8718 if (!(fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)) 8719 { /* likely */ } 8720 else 8568 8721 { 8569 8722 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
Note:
See TracChangeset
for help on using the changeset viewer.