- Timestamp:
- Aug 9, 2013 12:57:57 PM (11 years ago)
- Location:
- trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_vmx.h
r47445 r47635 93 93 AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 40); 94 94 AssertCompileSize(VMXRESTOREHOST, 56); 95 96 /** @name VMX HM-error codes for VERR_VMX_INVALID_GUEST_STATE. 97 * @{ 98 */ 99 /** An error occurred while checking invalid-guest-state. */ 100 #define VMX_IGS_ERROR 0 101 /** The invalid guest-state checks did not find any reason why. */ 102 #define VMX_IGS_REASON_NOT_FOUND 1 103 /** CR0 fixed1 bits invalid. */ 104 #define VMX_IGS_CR0_FIXED1 2 105 /** CR0 fixed0 bits invalid. */ 106 #define VMX_IGS_CR0_FIXED0 3 107 /** CR0.PE and CR0.PE invalid VT-x/host combination. */ 108 #define VMX_IGS_CR0_PG_PE_COMBO 4 109 /** CR4 fixed1 bits invalid. */ 110 #define VMX_IGS_CR4_FIXED1 5 111 /** CR4 fixed0 bits invalid. */ 112 #define VMX_IGS_CR4_FIXED0 6 113 /** Reserved bits in VMCS' DEBUGCTL MSR field not set to 0 when 114 * VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG is used. */ 115 #define VMX_IGS_DEBUGCTL_MSR_RESERVED 7 116 /** CR0.PG not set for long-mode when not using unrestricted guest. */ 117 #define VMX_IGS_CR0_PG_LONGMODE 8 118 /** CR4.PAE not set for long-mode guest when not using unrestricted guest. */ 119 #define VMX_IGS_CR4_PAE_LONGMODE 9 120 /** CR4.PCIDE set for 32-bit guest. */ 121 #define VMX_IGS_CR4_PCIDE 10 122 /** VMCS' DR7 reserved bits not set to 0. */ 123 #define VMX_IGS_DR7_RESERVED 11 124 /** VMCS' PERF_GLOBAL MSR reserved bits not set to 0. */ 125 #define VMX_IGS_PERF_GLOBAL_MSR_RESERVED 12 126 /** VMCS' EFER MSR reserved bits not set to 0. */ 127 #define VMX_IGS_EFER_MSR_RESERVED 13 128 /** VMCS' EFER MSR.LMA does not match the IA32e mode guest control. */ 129 #define VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH 14 130 /** VMCS' EFER MSR.LMA does not match CR0.PG of the guest when not using 131 * unrestricted guest. */ 132 #define VMX_IGS_EFER_LMA_PG_MISMATCH 15 133 /** CS.Attr.P bit invalid. */ 134 #define VMX_IGS_CS_ATTR_P_INVALID 16 135 /** CS.Attr reserved bits not set to 0. */ 136 #define VMX_IGS_CS_ATTR_RESERVED 17 137 /** CS.Attr.G bit invalid. */ 138 #define VMX_IGS_CS_ATTR_G_INVALID 18 139 /** CS is unusable. */ 140 #define VMX_IGS_CS_ATTR_UNUSABLE 19 141 /** CS and SS DPL unequal. */ 142 #define VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL 20 143 /** CS and SS DPL mismatch. */ 144 #define VMX_IGS_CS_SS_ATTR_DPL_MISMATCH 21 145 /** CS Attr.Type invalid. */ 146 #define VMX_IGS_CS_ATTR_TYPE_INVALID 22 147 /** CS and SS RPL unequal. */ 148 #define VMX_IGS_SS_CS_RPL_UNEQUAL 23 149 /** SS.Attr.DPL and SS RPL unequal. */ 150 #define VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL 24 151 /** SS.Attr.DPL invalid for segment type. */ 152 #define VMX_IGS_SS_ATTR_DPL_INVALID 25 153 /** SS.Attr.Type invalid. */ 154 #define VMX_IGS_SS_ATTR_TYPE_INVALID 26 155 /** SS.Attr.P bit invalid. */ 156 #define VMX_IGS_SS_ATTR_P_INVALID 27 157 /** SS.Attr reserved bits not set to 0. */ 158 #define VMX_IGS_SS_ATTR_RESERVED 28 159 /** SS.Attr.G bit invalid. */ 160 #define VMX_IGS_SS_ATTR_G_INVALID 29 161 /** DS.Attr.A bit invalid. */ 162 #define VMX_IGS_DS_ATTR_A_INVALID 30 163 /** DS.Attr.P bit invalid. */ 164 #define VMX_IGS_DS_ATTR_P_INVALID 31 165 /** DS.Attr.DPL and DS RPL unequal. */ 166 #define VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL 32 167 /** DS.Attr reserved bits not set to 0. */ 168 #define VMX_IGS_DS_ATTR_RESERVED 33 169 /** DS.Attr.G bit invalid. */ 170 #define VMX_IGS_DS_ATTR_G_INVALID 34 171 /** DS.Attr.Type invalid. */ 172 #define VMX_IGS_DS_ATTR_TYPE_INVALID 35 173 /** ES.Attr.A bit invalid. */ 174 #define VMX_IGS_ES_ATTR_A_INVALID 36 175 /** ES.Attr.P bit invalid. */ 176 #define VMX_IGS_ES_ATTR_P_INVALID 37 177 /** ES.Attr.DPL and DS RPL unequal. */ 178 #define VMX_IGS_ES_ATTR_DPL_RPL_UNEQUAL 38 179 /** ES.Attr reserved bits not set to 0. */ 180 #define VMX_IGS_ES_ATTR_RESERVED 39 181 /** ES.Attr.G bit invalid. */ 182 #define VMX_IGS_ES_ATTR_G_INVALID 40 183 /** ES.Attr.Type invalid. */ 184 #define VMX_IGS_ES_ATTR_TYPE_INVALID 41 185 /** FS.Attr.A bit invalid. */ 186 #define VMX_IGS_FS_ATTR_A_INVALID 42 187 /** FS.Attr.P bit invalid. */ 188 #define VMX_IGS_FS_ATTR_P_INVALID 43 189 /** FS.Attr.DPL and DS RPL unequal. */ 190 #define VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL 44 191 /** FS.Attr reserved bits not set to 0. */ 192 #define VMX_IGS_FS_ATTR_RESERVED 45 193 /** FS.Attr.G bit invalid. */ 194 #define VMX_IGS_FS_ATTR_G_INVALID 46 195 /** FS.Attr.Type invalid. */ 196 #define VMX_IGS_FS_ATTR_TYPE_INVALID 47 197 /** GS.Attr.A bit invalid. */ 198 #define VMX_IGS_GS_ATTR_A_INVALID 48 199 /** GS.Attr.P bit invalid. */ 200 #define VMX_IGS_GS_ATTR_P_INVALID 49 201 /** GS.Attr.DPL and DS RPL unequal. */ 202 #define VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL 50 203 /** GS.Attr reserved bits not set to 0. */ 204 #define VMX_IGS_GS_ATTR_RESERVED 51 205 /** GS.Attr.G bit invalid. */ 206 #define VMX_IGS_GS_ATTR_G_INVALID 52 207 /** GS.Attr.Type invalid. */ 208 #define VMX_IGS_GS_ATTR_TYPE_INVALID 53 209 /** V86 mode CS.Base invalid. */ 210 #define VMX_IGS_V86_CS_BASE_INVALID 54 211 /** V86 mode CS.Limit invalid. */ 212 #define VMX_IGS_V86_CS_LIMIT_INVALID 55 213 /** V86 mode CS.Attr invalid. */ 214 #define VMX_IGS_V86_CS_ATTR_INVALID 56 215 /** V86 mode SS.Base invalid. */ 216 #define VMX_IGS_V86_SS_BASE_INVALID 57 217 /** V86 mode SS.Limit invalid. */ 218 #define VMX_IGS_V86_SS_LIMIT_INVALID 59 219 /** V86 mode SS.Attr invalid. */ 220 #define VMX_IGS_V86_SS_ATTR_INVALID 59 221 /** V86 mode DS.Base invalid. */ 222 #define VMX_IGS_V86_DS_BASE_INVALID 60 223 /** V86 mode DS.Limit invalid. */ 224 #define VMX_IGS_V86_DS_LIMIT_INVALID 61 225 /** V86 mode DS.Attr invalid. */ 226 #define VMX_IGS_V86_DS_ATTR_INVALID 62 227 /** V86 mode ES.Base invalid. */ 228 #define VMX_IGS_V86_ES_BASE_INVALID 63 229 /** V86 mode ES.Limit invalid. */ 230 #define VMX_IGS_V86_ES_LIMIT_INVALID 64 231 /** V86 mode ES.Attr invalid. */ 232 #define VMX_IGS_V86_ES_ATTR_INVALID 65 233 /** V86 mode FS.Base invalid. */ 234 #define VMX_IGS_V86_FS_BASE_INVALID 66 235 /** V86 mode FS.Limit invalid. */ 236 #define VMX_IGS_V86_FS_LIMIT_INVALID 67 237 /** V86 mode FS.Attr invalid. */ 238 #define VMX_IGS_V86_FS_ATTR_INVALID 68 239 /** V86 mode GS.Base invalid. */ 240 #define VMX_IGS_V86_GS_BASE_INVALID 69 241 /** V86 mode GS.Limit invalid. */ 242 #define VMX_IGS_V86_GS_LIMIT_INVALID 70 243 /** V86 mode GS.Attr invalid. */ 244 #define VMX_IGS_V86_GS_ATTR_INVALID 71 245 /** Longmode CS.Base invalid. */ 246 #define VMX_IGS_LONGMODE_CS_BASE_INVALID 72 247 /** Longmode SS.Base invalid. */ 248 #define VMX_IGS_LONGMODE_SS_BASE_INVALID 73 249 /** Longmode DS.Base invalid. */ 250 #define VMX_IGS_LONGMODE_DS_BASE_INVALID 74 251 /** Longmode ES.Base invalid. */ 252 #define VMX_IGS_LONGMODE_ES_BASE_INVALID 75 253 /** SYSENTER ESP is not canonical. */ 254 #define VMX_IGS_SYSENTER_ESP_NOT_CANONICAL 76 255 /** SYSENTER EIP is not canonical. */ 256 #define VMX_IGS_SYSENTER_EIP_NOT_CANONICAL 77 257 /** PAT MSR invalid. */ 258 #define VMX_IGS_PAT_MSR_INVALID 78 259 /** PAT MSR reserved bits not set to 0. */ 260 #define VMX_IGS_PAT_MSR_RESERVED 79 261 /** GDTR.Base is not canonical. */ 262 #define VMX_IGS_GDTR_BASE_NOT_CANONICAL 80 263 /** IDTR.Base is not canonical. */ 264 #define VMX_IGS_IDTR_BASE_NOT_CANONICAL 81 265 /** GDTR.Limit invalid. */ 266 #define VMX_IGS_GDTR_LIMIT_INVALID 82 267 /** IDTR.Limit invalid. */ 268 #define VMX_IGS_IDTR_LIMIT_INVALID 83 269 /** Longmode RIP is invalid. */ 270 #define VMX_IGS_LONGMODE_RIP_INVALID 84 271 /** RFLAGS reserved bits not set to 0. */ 272 #define VMX_IGS_RFLAGS_RESERVED 85 273 /** RFLAGS RA1 reserved bits not set to 1. */ 274 #define VMX_IGS_RFLAGS_RESERVED1 86 275 /** RFLAGS.VM (V86 mode) invalid. */ 276 #define VMX_IGS_RFLAGS_VM_INVALID 87 277 /** RFLAGS.IF invalid. */ 278 #define VMX_IGS_RFLAGS_IF_INVALID 88 279 /** Activity state invalid. */ 280 #define VMX_IGS_ACTIVITY_STATE_INVALID 89 281 /** Activity state HLT invalid when SS.Attr.DPL is not zero. */ 282 #define VMX_IGS_ACTIVITY_STATE_HLT_INVALID 90 283 /** Activity state ACTIVE invalid when block-by-STI or MOV SS. */ 284 #define VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID 91 285 /** Activity state SIPI WAIT invalid. */ 286 #define VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID 92 287 /** Interruptibility state reserved bits not set to 0. */ 288 #define VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED 93 289 /** Interruptibility state cannot be block-by-STI -and- MOV SS. */ 290 #define VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID 94 291 /** Interruptibility state block-by-STI invalid for EFLAGS. */ 292 #define VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID 95 293 /** Interruptibility state invalid while trying to deliver external 294 * interrupt. */ 295 #define VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID 96 296 /** Interruptibility state block-by-MOVSS invalid while trying to deliver an 297 * NMI. */ 298 #define VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID 97 299 /** Interruptibility state block-by-SMI invalid when CPU is not in SMM. */ 300 #define VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID 98 301 /** Interruptibility state block-by-SMI invalid when trying to enter SMM. */ 302 #define VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID 99 303 /** Interruptibilty state block-by-STI (maybe) invalid when trying to deliver 304 * an NMI. */ 305 #define VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID 100 306 /** Interruptibility state block-by-NMI invalid when virtual-NMIs control is 307 * active. */ 308 #define VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID 101 309 /** Pending debug exceptions reserved bits not set to 0. */ 310 #define VMX_IGS_PENDING_DEBUG_RESERVED 102 311 /** Longmode pending debug exceptions reserved bits not set to 0. */ 312 #define VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED 103 313 /** Pending debug exceptions.BS bit is not set when it should be. */ 314 #define VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET 104 315 /** Pending debug exceptions.BS bit is not clear when it should be. */ 316 #define VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR 105 317 /** VMCS link pointer reserved bits not set to 0. */ 318 #define VMX_IGS_VMCS_LINK_PTR_RESERVED 106 319 /** @} */ 95 320 96 321 /** @name VMX VMCS-Read cache indices. … … 1174 1399 */ 1175 1400 #define VMX_ENTRY_INTERRUPTION_INFO_VALID(a) (a & RT_BIT(31)) 1401 #define VMX_ENTRY_INTERRUPTION_INFO_TYPE_SHIFT 8 1402 #define VMX_ENTRY_INTERRUPTION_INFO_TYPE(a) ((a >> VMX_ENTRY_INTERRUPTION_INFO_TYPE_SHIFT) & 7) 1176 1403 /** @} */ 1177 1404 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47619 r47635 39 39 #define HMVMX_SAVE_FULL_GUEST_STATE 40 40 #define HMVMX_SYNC_FULL_GUEST_STATE 41 #define HMVMX_ALWAYS_CHECK_GUEST_STATE 41 42 #define HMVMX_ALWAYS_TRAP_ALL_XCPTS 42 43 #define HMVMX_ALWAYS_TRAP_PF … … 354 355 /** @} */ 355 356 356 static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);357 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);358 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);359 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);360 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);361 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);362 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);363 357 static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 358 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 359 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 360 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 361 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 362 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 363 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient); 364 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 364 365 365 366 /******************************************************************************* … … 1877 1878 pVCpu->hm.s.vmx.u32ProcCtls2 = val; 1878 1879 } 1880 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest)) 1881 { 1882 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not " 1883 "available\n")); 1884 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1885 } 1879 1886 1880 1887 return VINF_SUCCESS; … … 5974 5981 5975 5982 /** 5976 * Does the necessary state syncing before doing a longjmp to ring-3. 5983 * Does the necessary state syncing before returning to ring-3 for any reason 5984 * (longjmp, preemption, voluntary exits to ring-3) from VT-x. 5977 5985 * 5978 5986 * @param pVM Pointer to the VM. … … 5981 5989 * out-of-sync. Make sure to update the required fields 5982 5990 * before using them. 5983 * @param rcExit The reason for exiting to ring-3. Can be5984 * VINF_VMM_UNKNOWN_RING3_CALL.5985 5991 * 5986 5992 * @remarks No-long-jmp zone!!! 5987 5993 */ 5988 static void hmR0VmxL ongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)5994 static void hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5989 5995 { 5990 5996 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 5991 5997 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 5992 5998 5993 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 5994 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL); 5995 AssertRC(rc); 5999 /* Save the guest state if necessary. */ 6000 if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL) 6001 { 6002 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 6003 AssertRC(rc); 6004 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL); 6005 } 5996 6006 5997 6007 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ … … 6031 6041 6032 6042 /** 6043 * Does the necessary state syncing before doing a longjmp to ring-3. 6044 * 6045 * @param pVM Pointer to the VM. 6046 * @param pVCpu Pointer to the VMCPU. 6047 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 6048 * out-of-sync. Make sure to update the required fields 6049 * before using them. 6050 * 6051 * @remarks No-long-jmp zone!!! 6052 */ 6053 DECLINLINE(void) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6054 { 6055 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); 6056 } 6057 6058 6059 /** 6033 6060 * An action requires us to go back to ring-3. This function does the necessary 6034 6061 * steps before we can safely return to ring-3. This is not the same as longjmps 6035 * to ring-3, this is voluntary. 6062 * to ring-3, this is voluntary and prepares the guest so it may continue 6063 * executing outside HM (recompiler/IEM). 6036 6064 * 6037 6065 * @param pVM Pointer to the VM. … … 6075 6103 } 6076 6104 6077 /* S ync. the guest state. */6078 hmR0VmxL ongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);6105 /* Save guest state and restore host state bits. */ 6106 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); 6079 6107 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 6080 6108 6109 /* Sync recompiler state. */ 6081 6110 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 6082 6111 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR … … 6086 6115 | CPUM_CHANGED_TR 6087 6116 | CPUM_CHANGED_HIDDEN_SEL_REGS); 6117 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0); 6118 if ( pVM->hm.s.fNestedPaging 6119 && CPUMIsGuestPagingEnabledEx(pMixedCtx)) 6120 { 6121 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); 6122 } 6088 6123 6089 6124 /* On our way back from ring-3 the following needs to be done. */ … … 6122 6157 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 6123 6158 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu)); 6124 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser , VINF_VMM_UNKNOWN_RING3_CALL);6159 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser); 6125 6160 VMMRZCallRing3Enable(pVCpu); 6126 6161 } … … 6283 6318 /* 6284 6319 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, 6285 * VMX_EXIT_MTF VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State". 6320 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI. 6321 * See Intel spec. 27.3.4 "Saving Non-Register State". 6286 6322 */ 6287 6323 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); … … 7018 7054 AssertRC(rc); 7019 7055 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags)); 7056 7057 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 7058 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx); 7059 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 7060 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 7061 #endif 7020 7062 7021 7063 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */ … … 7370 7412 7371 7413 7414 /** 7415 * Tries to determine what part of the guest-state VT-x has deemed as invalid 7416 * and update error record fields accordingly. 7417 * 7418 * @return VMX_IGS_* return codes. 7419 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything 7420 * wrong with the guest state. 7421 * 7422 * @param pVM Pointer to the VM. 7423 * @param pVCpu Pointer to the VMCPU. 7424 * @param pCtx Pointer to the guest-CPU state. 7425 */ 7426 static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 7427 { 7428 #define HMVMX_ERROR_BREAK(err) { uError = (err); break; } 7429 #define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \ 7430 uError = (err); \ 7431 break; \ 7432 } else do {} while (0) 7433 /* Duplicate of IEM_IS_CANONICAL(). */ 7434 #define HMVMX_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000)) 7435 7436 int rc; 7437 uint64_t u64Val; 7438 uint32_t u32Val; 7439 uint32_t uError = VMX_IGS_ERROR; 7440 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest; 7441 7442 do 7443 { 7444 /* 7445 * CR0. 7446 */ 7447 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1); 7448 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1); 7449 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). 7450 See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */ 7451 if (fUnrestrictedGuest) 7452 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); 7453 7454 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); 7455 AssertRCBreak(rc); 7456 HMVMX_CHECK_BREAK((u32Val & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1); 7457 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR0), VMX_IGS_CR0_FIXED0); 7458 if ( !fUnrestrictedGuest 7459 && CPUMIsGuestPagingEnabledEx(pCtx) 7460 && !CPUMIsGuestInProtectedMode(pVCpu)) 7461 { 7462 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO); 7463 } 7464 7465 /* 7466 * CR4. 7467 */ 7468 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1); 7469 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1); 7470 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val); 7471 AssertRCBreak(rc); 7472 HMVMX_CHECK_BREAK((u32Val & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1); 7473 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR4), VMX_IGS_CR4_FIXED0); 7474 7475 /* 7476 * IA32_DEBUGCTL MSR. 7477 */ 7478 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val); 7479 AssertRCBreak(rc); 7480 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG) 7481 && (u64Val & 0xfffffe3c)) /* Bits 31-9, bits 2-5 MBZ. */ 7482 { 7483 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED); 7484 } 7485 uint64_t u64DebugCtlMsr = u64Val; 7486 7487 /* 7488 * 64-bit checks. 7489 */ 7490 if (HMVMX_IS_64BIT_HOST_MODE()) 7491 { 7492 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST) 7493 && !fUnrestrictedGuest) 7494 { 7495 HMVMX_CHECK_BREAK(CPUMIsGuestPagingEnabledEx(pCtx), VMX_IGS_CR0_PG_LONGMODE); 7496 HMVMX_CHECK_BREAK((pCtx->cr4 & X86_CR4_PAE), VMX_IGS_CR4_PAE_LONGMODE); 7497 } 7498 7499 if ( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST) 7500 && (pCtx->cr4 & X86_CR4_PCIDE)) 7501 { 7502 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); 7503 } 7504 7505 /** @todo CR3 field must be such that bits 63:52 and bits in the range 7506 * 51:32 beyond the processor's physical-address width are 0. */ 7507 7508 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG) 7509 && (pCtx->dr[7] & X86_DR7_MBZ_MASK)) 7510 { 7511 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED); 7512 } 7513 7514 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val); 7515 AssertRCBreak(rc); 7516 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL); 7517 7518 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val); 7519 AssertRCBreak(rc); 7520 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL); 7521 } 7522 7523 /* 7524 * PERF_GLOBAL MSR. 7525 */ 7526 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR) 7527 { 7528 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val); 7529 AssertRCBreak(rc); 7530 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)), 7531 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63-35, bits 31-2 MBZ. */ 7532 } 7533 7534 /* 7535 * PAT MSR. 7536 */ 7537 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR) 7538 { 7539 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val); 7540 AssertRCBreak(rc); 7541 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED); 7542 for (unsigned i = 0; i < 8; i++) 7543 { 7544 uint8_t u8Val = (u64Val & 0x7); 7545 if ( u8Val != 0 /* UC */ 7546 || u8Val != 1 /* WC */ 7547 || u8Val != 4 /* WT */ 7548 || u8Val != 5 /* WP */ 7549 || u8Val != 6 /* WB */ 7550 || u8Val != 7 /* UC- */) 7551 { 7552 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID); 7553 } 7554 u64Val >>= 3; 7555 } 7556 } 7557 7558 /* 7559 * EFER MSR. 7560 */ 7561 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR) 7562 { 7563 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val); 7564 AssertRCBreak(rc); 7565 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)), 7566 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63-12, bit 9, bits 7-1 MBZ. */ 7567 HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST), 7568 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); 7569 HMVMX_CHECK_BREAK( fUnrestrictedGuest 7570 || (u64Val & MSR_K6_EFER_LMA) == (pCtx->cr0 & X86_CR0_PG), VMX_IGS_EFER_LMA_PG_MISMATCH); 7571 } 7572 7573 /* 7574 * Segment registers. 7575 */ 7576 if ( !pVM->hm.s.vmx.fUnrestrictedGuest 7577 && ( !CPUMIsGuestInRealModeEx(pCtx) 7578 && !CPUMIsGuestInV86ModeEx(pCtx))) 7579 { 7580 /* Protected mode checks */ 7581 /* CS */ 7582 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID); 7583 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED); 7584 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED); 7585 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff 7586 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID); 7587 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000) 7588 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID); 7589 /* CS cannot be loaded with NULL in protected mode. */ 7590 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE); 7591 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11) 7592 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL); 7593 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15) 7594 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH); 7595 else 7596 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID); 7597 /* SS */ 7598 HMVMX_CHECK_BREAK((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL); 7599 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL); 7600 if ( !(pCtx->cr0 & X86_CR0_PE) 7601 || pCtx->cs.Attr.n.u4Type == 3) 7602 { 7603 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID); 7604 } 7605 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE)) 7606 { 7607 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID); 7608 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID); 7609 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED); 7610 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED); 7611 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff 7612 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID); 7613 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000) 7614 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID); 7615 } 7616 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */ 7617 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE)) 7618 { 7619 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID); 7620 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID); 7621 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), 7622 VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL); 7623 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED); 7624 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED); 7625 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff 7626 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID); 7627 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000) 7628 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID); 7629 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE) 7630 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID); 7631 } 7632 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE)) 7633 { 7634 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID); 7635 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID); 7636 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), 7637 VMX_IGS_ES_ATTR_DPL_RPL_UNEQUAL); 7638 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED); 7639 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED); 7640 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff 7641 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID); 7642 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000) 7643 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID); 7644 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE) 7645 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID); 7646 } 7647 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE)) 7648 { 7649 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID); 7650 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID); 7651 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), 7652 VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL); 7653 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED); 7654 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED); 7655 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff 7656 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID); 7657 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000) 7658 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID); 7659 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE) 7660 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID); 7661 } 7662 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE)) 7663 { 7664 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID); 7665 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID); 7666 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), 7667 VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL); 7668 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED); 7669 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED); 7670 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff 7671 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID); 7672 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000) 7673 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID); 7674 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE) 7675 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID); 7676 } 7677 /* 64-bit capable CPUs. */ 7678 if (HMVMX_IS_64BIT_HOST_MODE()) 7679 { 7680 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID); 7681 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32), VMX_IGS_LONGMODE_SS_BASE_INVALID); 7682 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32), VMX_IGS_LONGMODE_DS_BASE_INVALID); 7683 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32), VMX_IGS_LONGMODE_ES_BASE_INVALID); 7684 } 7685 } 7686 else if ( CPUMIsGuestInV86ModeEx(pCtx) 7687 || ( CPUMIsGuestInRealModeEx(pCtx) 7688 && !pVM->hm.s.vmx.fUnrestrictedGuest)) 7689 { 7690 /* Real and v86 mode checks. */ 7691 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr; 7692 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7693 { 7694 u32CSAttr = 0xf3; u32SSAttr = 0xf3; 7695 u32DSAttr = 0xf3; u32ESAttr = 0xf3; 7696 u32FSAttr = 0xf3; u32GSAttr = 0xf3; 7697 } 7698 else 7699 { 7700 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; 7701 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u; 7702 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u; 7703 } 7704 7705 /* CS */ 7706 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID); 7707 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID); 7708 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID); 7709 /* SS */ 7710 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID); 7711 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID); 7712 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID); 7713 /* DS */ 7714 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID); 7715 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID); 7716 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID); 7717 /* ES */ 7718 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID); 7719 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID); 7720 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID); 7721 /* FS */ 7722 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID); 7723 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID); 7724 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID); 7725 /* GS */ 7726 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID); 7727 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID); 7728 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID); 7729 /* 64-bit capable CPUs. */ 7730 if (HMVMX_IS_64BIT_HOST_MODE()) 7731 { 7732 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID); 7733 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32), VMX_IGS_LONGMODE_SS_BASE_INVALID); 7734 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32), VMX_IGS_LONGMODE_DS_BASE_INVALID); 7735 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32), VMX_IGS_LONGMODE_ES_BASE_INVALID); 7736 } 7737 } 7738 7739 /* 7740 * GDTR and IDTR. 7741 */ 7742 if (HMVMX_IS_64BIT_HOST_MODE()) 7743 { 7744 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 7745 AssertRCBreak(rc); 7746 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL); 7747 7748 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 7749 AssertRCBreak(rc); 7750 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL); 7751 } 7752 7753 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); 7754 AssertRCBreak(rc); 7755 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */ 7756 7757 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); 7758 AssertRCBreak(rc); 7759 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */ 7760 7761 /* 7762 * RIP and RFLAGS. 7763 */ 7764 uint32_t u32EFlags; 7765 if (HMVMX_IS_64BIT_HOST_MODE()) 7766 { 7767 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val); 7768 AssertRCBreak(rc); 7769 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */ 7770 if ( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST) 7771 || !pCtx->cs.Attr.n.u1Long ) 7772 { 7773 HMVMX_CHECK_BREAK(!(u64Val & 0xffffffff00000000), VMX_IGS_LONGMODE_RIP_INVALID); 7774 } 7775 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N 7776 * must be identical if the "IA32e mode guest" VM-entry control is 1 7777 * and CS.L is 1. No check applies if the CPU supports 64 7778 * linear-address bits. */ 7779 7780 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val); 7781 AssertRCBreak(rc); 7782 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */ 7783 HMVMX_CHECK_BREAK(!(u64Val & (X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), VMX_IGS_RFLAGS_RESERVED); 7784 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); 7785 u32EFlags = u64Val; 7786 } 7787 else 7788 { 7789 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32EFlags); 7790 AssertRCBreak(rc); 7791 HMVMX_CHECK_BREAK(!(u32EFlags & (X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK)), VMX_IGS_RFLAGS_RESERVED); 7792 HMVMX_CHECK_BREAK((u32EFlags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); 7793 } 7794 7795 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST) 7796 || !(pCtx->cr0 & X86_CR0_PE)) 7797 { 7798 HMVMX_CHECK_BREAK(!(u32EFlags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID); 7799 } 7800 7801 uint32_t u32EntryInfo; 7802 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo); 7803 AssertRCBreak(rc); 7804 if ( VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo) 7805 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 7806 { 7807 HMVMX_CHECK_BREAK(u32Val & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID); 7808 } 7809 7810 /* 7811 * Guest Non-Register State. 7812 */ 7813 /* Activity State. */ 7814 uint32_t u32ActivityState; 7815 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState); 7816 AssertRCBreak(rc); 7817 HMVMX_CHECK_BREAK( !u32ActivityState 7818 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)), 7819 VMX_IGS_ACTIVITY_STATE_INVALID); 7820 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl) 7821 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID); 7822 uint32_t u32IntrState; 7823 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState); 7824 AssertRCBreak(rc); 7825 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS 7826 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI) 7827 { 7828 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID); 7829 } 7830 7831 /** @todo Activity state and injecting interrupts. Left as a todo since we 7832 * currently don't use activity states but ACTIVE. */ 7833 7834 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM) 7835 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID); 7836 7837 /* Guest interruptibility-state. */ 7838 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED); 7839 HMVMX_CHECK_BREAK( (u32IntrState 7840 & (VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)) 7841 != (VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS), 7842 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID); 7843 HMVMX_CHECK_BREAK( (u32EFlags & X86_EFL_IF) 7844 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI), 7845 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID); 7846 if (VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo)) 7847 { 7848 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 7849 { 7850 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI) 7851 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS), 7852 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID); 7853 } 7854 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 7855 { 7856 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS), 7857 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID); 7858 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI), 7859 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID); 7860 } 7861 } 7862 /** @todo Assumes the processor is not in SMM. */ 7863 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI), 7864 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID); 7865 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM) 7866 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI), 7867 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID); 7868 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI) 7869 && VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo) 7870 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 7871 { 7872 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI), 7873 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID); 7874 } 7875 7876 /* Pending debug exceptions. */ 7877 if (HMVMX_IS_64BIT_HOST_MODE()) 7878 { 7879 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val); 7880 AssertRCBreak(rc); 7881 /* Bits 63-15, Bit 13, Bits 11-4 MBZ. */ 7882 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED); 7883 u32Val = u64Val; /* For pending debug exceptions checks below. */ 7884 } 7885 else 7886 { 7887 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val); 7888 AssertRCBreak(rc); 7889 /* Bits 31-15, Bit 13, Bits 11-4 MBZ. */ 7890 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED); 7891 } 7892 7893 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI) 7894 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS) 7895 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT) 7896 { 7897 if ( (u32EFlags & X86_EFL_TF) 7898 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */ 7899 { 7900 /* Bit 14 is PendingDebug.BS. */ 7901 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET); 7902 } 7903 if ( !(u32EFlags & X86_EFL_TF) 7904 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */ 7905 { 7906 /* Bit 14 is PendingDebug.BS. */ 7907 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR); 7908 } 7909 } 7910 7911 /* VMCS link pointer. */ 7912 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val); 7913 AssertRCBreak(rc); 7914 if (u64Val != UINT64_C(0xffffffffffffffff)) 7915 { 7916 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED); 7917 /** @todo Bits beyond the processor's physical-address width MBZ. */ 7918 /** @todo 32-bit located in memory referenced by value of this field (as a 7919 * physical address) must contain the processor's VMCS revision ID. */ 7920 /** @todo SMM checks. */ 7921 } 7922 7923 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries. */ 7924 7925 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */ 7926 if (uError == VMX_IGS_ERROR) 7927 uError == VMX_IGS_REASON_NOT_FOUND; 7928 } while (0); 7929 7930 pVCpu->hm.s.u32HMError = uError; 7931 return uError; 7932 7933 #undef HMVMX_ERROR_BREAK 7934 #undef HMVMX_CHECK_BREAK 7935 #undef HMVMX_IS_CANONICAL 7936 } 7937 7372 7938 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 7373 7939 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ … … 7968 8534 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7969 8535 AssertRCReturn(rc, rc); 8536 8537 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx); 8538 NOREF(uInvalidReason); 7970 8539 7971 8540 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo)); -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r47584 r47635 2983 2983 break; 2984 2984 2985 case VERR_VMX_INVALID_GUEST_STATE: 2985 2986 case VERR_VMX_UNEXPECTED_EXIT_CODE: 2986 2987 case VERR_SVM_UNKNOWN_EXIT: … … 2988 2989 case VERR_SVM_UNEXPECTED_PATCH_TYPE: 2989 2990 case VERR_SVM_UNEXPECTED_XCPT_EXIT: 2990 LogRel(("HM: CPU%d HM error %#x \n", i, pVM->aCpus[i].hm.s.u32HMError));2991 LogRel(("HM: CPU%d HM error %#x (%d)\n", i, pVM->aCpus[i].hm.s.u32HMError, pVM->aCpus[i].hm.s.u32HMError)); 2991 2992 break; 2992 2993 }
Note:
See TracChangeset
for help on using the changeset viewer.