Changeset 74468 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 26, 2018 4:10:23 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r74450 r74468 113 113 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */ 114 114 { 115 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64 GuestPhysAddr),115 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestPhysAddr), 116 116 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, 117 117 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, … … 231 231 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */ 232 232 { 233 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64 ExitQual),234 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64 IoRcx),235 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64 IoRsi),236 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64 IoRdi),237 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64 IoRip),238 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64 GuestLinearAddr),233 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64RoExitQual), 234 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRcx), 235 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRsi), 236 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRdi), 237 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRip), 238 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestLinearAddr), 239 239 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, 240 240 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, … … 1229 1229 1230 1230 /** 1231 * Sets the VM-exit qualification VMCS field. 1232 * 1233 * @param pVCpu The cross context virtual CPU structure. 1234 * @param uExitQual The VM-exit qualification field. 1235 */ 1236 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual) 1237 { 1238 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1239 pVmcs->u64RoExitQual.u = uExitQual; 1240 } 1241 1242 1243 /** 1244 * Sets the VM-exit instruction length VMCS field. 1245 * 1246 * @param pVCpu The cross context virtual CPU structure. 1247 * @param cbInstr The VM-exit instruction length (in bytes). 1248 */ 1249 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr) 1250 { 1251 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1252 pVmcs->u32RoExitInstrLen = cbInstr; 1253 } 1254 1255 1256 /** 1257 * Sets the VM-exit instruction info. VMCS field. 1258 * 1259 * @param pVCpu The cross context virtual CPU structure. 1260 * @param uExitInstrInfo The VM-exit instruction info. field. 1261 */ 1262 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo) 1263 { 1264 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1265 pVmcs->u32RoExitInstrInfo = uExitInstrInfo; 1266 } 1267 1268 1269 /** 1231 1270 * Implements VMSucceed for VMX instruction success. 1232 1271 * … … 2475 2514 IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu) 2476 2515 { 2477 /* Assert that we are notcalled multiple times during VM-entry. */2516 /* We shouldn't be called multiple times during VM-entry. */ 2478 2517 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0); 2518 2519 /* MTF should not be set outside VMX non-root mode. */ 2520 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_MTF)); 2479 2521 2480 2522 /* 2481 2523 * Preserve the required force-flags. 2482 2524 * 2483 * We only preserve the force-flags that would affect the execution of the 2484 * nested-guest (or the guest). 2525 * We cache and clear force-flags that would affect the execution of the 2526 * nested-guest. Cached flags are then restored while returning to the guest 2527 * if necessary. 2485 2528 * 2486 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be preserved as VM-exit explicitly 2487 * clears interrupt-inhibition and on VM-entry the guest-interruptibility 2488 * state provides the inhibition if any. 2529 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects 2530 * interrupts until the completion of the current VMLAUNCH/VMRESUME 2531 * instruction. Interrupt inhibition for any nested-guest instruction 2532 * will be set later while loading the guest-interruptibility state. 2489 2533 * 2490 * - VMCPU_FF_BLOCK_NMIS needs not be preserved as VM-entry does not discard2491 * any NMI blocking. VM-exits caused directly by NMIs (intercepted by the2492 * exception bitmap) do block subsequent NMIs.2534 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before 2535 * successful VM-entry needs to continue blocking NMIs if it was in effect 2536 * during VM-entry. 2493 2537 * 2494 2538 * - MTF need not be preserved as it's used only in VMX non-root mode and 2495 2539 * is supplied on VM-entry through the VM-execution controls. 2496 2540 * 2497 * The remaining FFs (e.g. timers) can stay in place so that we will be able to 2498 * generate interrupts that should cause #VMEXITs for the nested-guest. 2541 * The remaining FFs (e.g. timers, APIC updates) must stay in place so that 2542 * we will be able to generate interrupts that may cause VM-exits for 2543 * the nested-guest. 2499 2544 */ 2500 uint32_t const fDiscardMask = VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_MTF | VMCPU_FF_BLOCK_NMIS; 2501 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & fDiscardMask; 2502 VMCPU_FF_CLEAR(pVCpu, fDiscardMask); 2545 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS; 2546 2547 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS)) 2548 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS); 2503 2549 } 2504 2550 … … 2672 2718 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2673 2719 2674 /* 2675 * Activity-state: VM-exits occur before changing the activity state 2676 * of the processor and hence we shouldn't need to change it. 2677 */ 2720 /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */ 2678 2721 2679 2722 /* Interruptibility-state. */ … … 2729 2772 Assert(pVmcs); 2730 2773 2731 /*2732 * Save guest control, debug, segment, descriptor-table registers and some MSRs.2733 */2734 2774 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu); 2735 2775 iemVmxVmexitSaveGuestSegRegs(pVCpu); … … 2737 2777 /* 2738 2778 * Save guest RIP, RSP and RFLAGS. 2779 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS". 2739 2780 */ 2740 2781 /* We don't support enclave mode yet. */ … … 2743 2784 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */ 2744 2785 2745 /* Save guest non-register state. */2746 2786 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason); 2747 2787 } … … 2789 2829 { 2790 2830 if ( !pMsr->u32Reserved 2791 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 82792 && pMsr->u32Msr != MSR_IA32_SMBASE)2831 && pMsr->u32Msr != MSR_IA32_SMBASE 2832 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8) 2793 2833 { 2794 2834 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value); … … 2836 2876 * Performs a VMX abort (due to an fatal error during VM-exit). 2837 2877 * 2838 * @returns VBox status code.2878 * @returns Strict VBox status code. 2839 2879 * @param pVCpu The cross context virtual CPU structure. 2840 2880 * @param enmAbort The VMX abort reason. 2841 2881 */ 2842 IEM_STATIC intiemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)2882 IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort) 2843 2883 { 2844 2884 /* … … 3079 3119 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3080 3120 const char *const pszFailure = "VMX-abort"; 3081 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);3121 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 3082 3122 3083 3123 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) … … 3160 3200 && pMsr->u32Msr != MSR_K8_GS_BASE 3161 3201 && pMsr->u32Msr != MSR_K6_EFER 3162 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 83163 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)3202 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL 3203 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8) 3164 3204 { 3165 3205 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value); … … 3199 3239 * Loads the host state as part of VM-exit. 3200 3240 * 3201 * @returns VBox status code.3241 * @returns Strict VBox status code. 3202 3242 * @param pVCpu The cross context virtual CPU structure. 3203 3243 * @param uExitReason The VM-exit reason (for logging purposes). 3204 3244 */ 3205 IEM_STATIC intiemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)3245 IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason) 3206 3246 { 3207 3247 /* … … 3220 3260 } 3221 3261 3222 /*3223 * Load host control, debug, segment, descriptor-table registers and some MSRs.3224 */3225 3262 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu); 3226 3263 iemVmxVmexitLoadHostSegRegs(pVCpu); … … 3244 3281 if (rcStrict == VINF_SUCCESS) 3245 3282 { 3246 /* Check host PDPTEs . */3283 /* Check host PDPTEs (only when we've fully switched page tables_. */ 3247 3284 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */ 3248 3285 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason); … … 3262 3299 { 3263 3300 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason)); 3264 return rcStrict;3301 return VBOXSTRICTRC_VAL(rcStrict); 3265 3302 } 3266 3303 … … 3275 3312 } 3276 3313 3277 return VINF_SUCCESS;3314 return rcStrict; 3278 3315 } 3279 3316 … … 3291 3328 Assert(pVmcs); 3292 3329 3293 pVmcs->u32RoExitReason 3330 pVmcs->u32RoExitReason = uExitReason; 3294 3331 3295 3332 /** @todo NSTVMX: Update VM-exit instruction length for instruction VM-exits. */ … … 3313 3350 } 3314 3351 3315 int rc = iemVmxVmexitLoadHostState(pVCpu, uExitReason); 3316 if (RT_FAILURE(rc)) 3317 return rc; 3318 3319 /** @todo NSTVMX: rest of VM-exit. */ 3352 /* 3353 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in 3354 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can 3355 * pass just the lower bits, till then an assert should suffice. 3356 */ 3357 Assert(!RT_HI_U16(uExitReason)); 3358 3359 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason); 3360 if (RT_FAILURE(rcStrict)) 3361 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict))); 3320 3362 3321 3363 /* We're no longer in nested-guest execution mode. */ 3322 3364 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false; 3323 3365 3324 return VINF_SUCCESS;3366 return rcStrict; 3325 3367 } 3326 3368 … … 3339 3381 */ 3340 3382 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3383 const char *const pszFailure = "VM-exit"; 3341 3384 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 3342 const char *const pszFailure = "VM-exit";3343 3385 3344 3386 /* CR0 reserved bits. */ … … 3468 3510 */ 3469 3511 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3512 const char *const pszFailure = "VM-exit"; 3470 3513 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM); 3471 3514 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 3472 3515 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 3473 const char *const pszFailure = "VM-exit";3474 3516 3475 3517 /* Selectors. */ … … 3857 3899 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3858 3900 const char *const pszFailure = "VM-exit"; 3901 3859 3902 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode) 3860 3903 { … … 3902 3945 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3903 3946 const char *const pszFailure = "VM-exit"; 3904 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);3947 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 3905 3948 3906 3949 /* RIP. */ … … 4077 4120 { /* likely */ } 4078 4121 else 4122 { 4123 /* 4124 * We don't support injecting NMIs when blocking-by-STI would be in effect. 4125 * We update the VM-exit qualification only when blocking-by-STI is set 4126 * without blocking-by-MovSS being set. Although in practise it does not 4127 * make much difference since the order of checks are implementation defined. 4128 */ 4129 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 4130 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT); 4079 4131 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi); 4132 } 4080 4133 4081 4134 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) … … 4139 4192 else 4140 4193 { 4141 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;4194 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR); 4142 4195 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs); 4143 4196 } … … 4148 4201 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs)) 4149 4202 { 4150 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;4203 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR); 4151 4204 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr); 4152 4205 } … … 4158 4211 if (RT_FAILURE(rc)) 4159 4212 { 4160 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;4213 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR); 4161 4214 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys); 4162 4215 } … … 4167 4220 else 4168 4221 { 4169 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;4222 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR); 4170 4223 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId); 4171 4224 } … … 4177 4230 else 4178 4231 { 4179 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;4232 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR); 4180 4233 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow); 4181 4234 } … … 4220 4273 else 4221 4274 { 4222 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;4275 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE); 4223 4276 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte); 4224 4277 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag); … … 4228 4281 else 4229 4282 { 4230 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;4283 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE); 4231 4284 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys); 4232 4285 } … … 4281 4334 IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr) 4282 4335 { 4283 /* Check control registers, debug registers and MSRs. */4284 4336 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr); 4285 4337 if (RT_SUCCESS(rc)) 4286 4338 { 4287 /* Check guest segment registers, LDTR, TR. */4288 4339 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr); 4289 4340 if (RT_SUCCESS(rc)) 4290 4341 { 4291 /* Check guest GDTR and IDTR. */4292 4342 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr); 4293 4343 if (RT_SUCCESS(rc)) 4294 4344 { 4295 /* Check guest RIP, RSP and RFLAGS. */4296 4345 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr); 4297 4346 if (RT_SUCCESS(rc)) 4298 4347 { 4299 /* Check guest non-register state. */4300 4348 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr); 4301 4349 if (RT_SUCCESS(rc)) 4302 {4303 /* Check guest PDPTEs. */4304 4350 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr); 4305 }4306 4351 } 4307 4352 } … … 5083 5128 else 5084 5129 { 5085 pVmcs->u64ExitQual.u = VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR);5130 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR)); 5086 5131 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount); 5087 5132 } … … 5100 5145 && pMsr->u32Msr != MSR_K8_GS_BASE 5101 5146 && pMsr->u32Msr != MSR_K6_EFER 5102 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 85103 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)5147 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL 5148 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8) 5104 5149 { 5105 5150 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value); … … 5114 5159 * MSR in ring-0 if possible, or come up with a better, generic solution. 5115 5160 */ 5116 pVmcs->u64ExitQual.u = idxMsr;5161 iemVmxVmcsSetExitQual(pVCpu, idxMsr); 5117 5162 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE 5118 5163 ? kVmxVDiag_Vmentry_MsrLoadRing3 … … 5122 5167 else 5123 5168 { 5124 pVmcs->u64ExitQual.u = idxMsr;5169 iemVmxVmcsSetExitQual(pVCpu, idxMsr); 5125 5170 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd); 5126 5171 } … … 5161 5206 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u)); 5162 5207 5163 if (pVmcs->u32GuestIntrState ==VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)5208 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI) 5164 5209 { 5165 5210 /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense. … … 5169 5214 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 5170 5215 } 5171 else if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI 5172 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS) 5173 { 5216 else 5217 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)); 5218 5219 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)) 5174 5220 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 5175 } 5221 else 5222 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 5176 5223 5177 5224 /* SMI blocking is irrelevant. We don't support SMIs yet. */ … … 5200 5247 IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr) 5201 5248 { 5202 /*5203 * Load guest control, debug, segment, descriptor-table registers and some MSRs.5204 */5205 5249 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu); 5206 5250 iemVmxVmentryLoadGuestSegRegs(pVCpu); … … 5215 5259 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u; 5216 5260 5217 /* Load guest non-register state. */5218 5261 iemVmxVmentryLoadGuestNonRegState(pVCpu); 5219 5262 … … 5364 5407 } 5365 5408 5366 /* Check VM-execution control fields. */5367 5409 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr); 5368 5410 if (RT_SUCCESS(rc)) 5369 5411 { 5370 /* Check VM-exit control fields. */5371 5412 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr); 5372 5413 if (RT_SUCCESS(rc)) 5373 5414 { 5374 /* Check VM-entry control fields. */5375 5415 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr); 5376 5416 if (RT_SUCCESS(rc)) 5377 5417 { 5378 /* Check host-state fields. */5379 5418 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr); 5380 5419 if (RT_SUCCESS(rc)) 5381 5420 { 5382 /* Save the (outer)guest force-flags as VM-exits can occur from this point on. */5421 /* Save the guest force-flags as VM-exits can occur from this point on. */ 5383 5422 iemVmxVmentrySaveForceFlags(pVCpu); 5384 5423 5385 /* Check guest-state fields. */5386 5424 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr); 5387 5425 if (RT_SUCCESS(rc)) 5388 5426 { 5389 /* Load guest-state fields. */5390 5427 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr); 5391 5428 if (RT_SUCCESS(rc)) 5392 5429 { 5393 /* Load MSRs from the VM-entry auto-load MSR area. */5394 5430 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr); 5395 5431 if (RT_SUCCESS(rc)) … … 5420 5456 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true; 5421 5457 5422 /* Event injection. */5458 /* Now that we've switched page tables, we can inject events if any. */ 5423 5459 iemVmxVmentryInjectEvent(pVCpu, pszInstr); 5424 5460 … … 5428 5464 return VINF_SUCCESS; 5429 5465 } 5430 5431 5466 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED); 5432 5467 } 5433 5468 } 5434 5435 5469 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED); 5436 5470 } … … 5492 5526 } 5493 5527 5494 /* 5495 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M. 5496 */ 5528 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */ 5497 5529 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false; 5498 5530 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
Note:
See TracChangeset
for help on using the changeset viewer.