Changeset 72963 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jul 8, 2018 5:56:09 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72911 r72963 988 988 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */ 989 989 pVmcbCtrl->u32InterceptXcpt |= 0 990 | RT_BIT(X86_XCPT_BP)991 | RT_BIT(X86_XCPT_DE)992 | RT_BIT(X86_XCPT_NM)993 | RT_BIT(X86_XCPT_UD)994 | RT_BIT(X86_XCPT_NP)995 | RT_BIT(X86_XCPT_SS)996 | RT_BIT(X86_XCPT_GP)997 | RT_BIT(X86_XCPT_PF)998 | RT_BIT(X86_XCPT_MF)999 ;990 | RT_BIT(X86_XCPT_BP) 991 | RT_BIT(X86_XCPT_DE) 992 | RT_BIT(X86_XCPT_NM) 993 | RT_BIT(X86_XCPT_UD) 994 | RT_BIT(X86_XCPT_NP) 995 | RT_BIT(X86_XCPT_SS) 996 | RT_BIT(X86_XCPT_GP) 997 | RT_BIT(X86_XCPT_PF) 998 | RT_BIT(X86_XCPT_MF) 999 ; 1000 1000 #endif 1001 1001 … … 1018 1018 { 1019 1019 pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE 1020 |SVM_CTRL_INTERCEPT_VMLOAD;1020 | SVM_CTRL_INTERCEPT_VMLOAD; 1021 1021 } 1022 1022 … … 1026 1026 { 1027 1027 pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI 1028 |SVM_CTRL_INTERCEPT_STGI;1028 | SVM_CTRL_INTERCEPT_STGI; 1029 1029 } 1030 1030 #endif … … 1065 1065 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */ 1066 1066 pVmcbCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG 1067 |SVM_CTRL_INTERCEPT_TASK_SWITCH;1067 | SVM_CTRL_INTERCEPT_TASK_SWITCH; 1068 1068 1069 1069 /* Page faults must be intercepted to implement shadow paging. */ … … 1558 1558 { 1559 1559 uShadowCr0 |= X86_CR0_PG /* Use shadow page tables. */ 1560 | X86_CR0_WP;/* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */1560 | X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF #VMEXIT. */ 1561 1561 } 1562 1562 … … 2288 2288 pVmcbNstGstCtrl->u64InterceptCtrl |= (pVmcb->ctrl.u64InterceptCtrl & ~( SVM_CTRL_INTERCEPT_VINTR 2289 2289 | SVM_CTRL_INTERCEPT_VMMCALL)) 2290 |HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS;2290 | HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS; 2291 2291 2292 2292 Assert( (pVmcbNstGstCtrl->u64InterceptCtrl & HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS) … … 2655 2655 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVirtVmsaveVmload) 2656 2656 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VMSAVE 2657 | SVM_CTRL_INTERCEPT_VMLOAD;2657 | SVM_CTRL_INTERCEPT_VMLOAD; 2658 2658 2659 2659 /* … … 2663 2663 if (!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fSvmVGif) 2664 2664 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_CLGI 2665 | SVM_CTRL_INTERCEPT_STGI;2665 | SVM_CTRL_INTERCEPT_STGI; 2666 2666 2667 2667 /* Merge the guest and nested-guest intercepts. */ … … 3285 3285 3286 3286 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 3287 CPUMSetChangedFlags(pVCpu, 3288 3289 3290 3291 3292 3287 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR 3288 | CPUM_CHANGED_LDTR 3289 | CPUM_CHANGED_GDTR 3290 | CPUM_CHANGED_IDTR 3291 | CPUM_CHANGED_TR 3292 | CPUM_CHANGED_HIDDEN_SEL_REGS); 3293 3293 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 3294 3294 && CPUMIsGuestPagingEnabledEx(pCtx)) … … 3759 3759 { 3760 3760 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 3761 HMSVM_CPUMCTX_ASSERT(pVCpu, 3762 3763 3764 3761 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT 3762 | CPUMCTX_EXTRN_RFLAGS 3763 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW 3764 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ); 3765 3765 3766 3766 Assert(!pVCpu->hm.s.Event.fPending); … … 3881 3881 { 3882 3882 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3883 HMSVM_CPUMCTX_ASSERT(pVCpu, 3884 3885 3883 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT 3884 | CPUMCTX_EXTRN_RFLAGS 3885 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW); 3886 3886 3887 3887 Assert(!pVCpu->hm.s.Event.fPending); … … 4811 4811 * - Shared state (only DR7 currently) for exporting shared debug state on preemption. 4812 4812 */ 4813 hmR0SvmImportGuestState(pVCpu, 4814 4815 4816 4817 4818 4819 4820 4821 4813 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 4814 | CPUMCTX_EXTRN_RFLAGS 4815 | CPUMCTX_EXTRN_RAX 4816 | CPUMCTX_EXTRN_RSP 4817 | CPUMCTX_EXTRN_CS 4818 | CPUMCTX_EXTRN_HWVIRT 4819 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW 4820 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ 4821 | HMSVM_CPUMCTX_SHARED_STATE); 4822 4822 #endif 4823 4823 … … 6285 6285 { 6286 6286 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6287 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6288 | CPUMCTX_EXTRN_CR4 6289 | CPUMCTX_EXTRN_SS); 6287 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_SS); 6290 6288 6291 6289 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); … … 6359 6357 { 6360 6358 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6361 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6362 | CPUMCTX_EXTRN_SS); 6359 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS); 6363 6360 6364 6361 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); … … 6384 6381 { 6385 6382 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6386 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6387 | CPUMCTX_EXTRN_SS); 6383 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS); 6388 6384 6389 6385 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); … … 6846 6842 { 6847 6843 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6848 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 6849 | CPUMCTX_EXTRN_SREG_MASK); 6844 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK); 6850 6845 6851 6846 /* I/O operation lookup arrays. */ … … 6871 6866 } 6872 6867 6873 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP 6874 | CPUMCTX_EXTRN_CS 6875 | CPUMCTX_EXTRN_RFLAGS); 6868 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); 6876 6869 VBOXSTRICTRC rcStrict; 6877 6870 PCEMEXITREC pExitRec = NULL; … … 7142 7135 return VINF_EM_RAW_INJECT_TRPM_EVENT; 7143 7136 7144 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP 7145 | CPUMCTX_EXTRN_CS); 7137 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 7146 7138 VBOXSTRICTRC rcStrict; 7147 7139 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, … … 7729 7721 { 7730 7722 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7731 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7732 | CPUMCTX_EXTRN_HWVIRT); 7723 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_HWVIRT); 7733 7724 7734 7725 #ifdef VBOX_STRICT … … 7753 7744 { 7754 7745 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7755 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7756 | CPUMCTX_EXTRN_HWVIRT); 7746 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_HWVIRT); 7757 7747 7758 7748 /* … … 7778 7768 { 7779 7769 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7780 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7781 | CPUMCTX_EXTRN_FS 7782 | CPUMCTX_EXTRN_GS 7783 | CPUMCTX_EXTRN_TR 7784 | CPUMCTX_EXTRN_LDTR 7785 | CPUMCTX_EXTRN_KERNEL_GS_BASE 7786 | CPUMCTX_EXTRN_SYSCALL_MSRS 7787 | CPUMCTX_EXTRN_SYSENTER_MSRS); 7770 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7771 | CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR 7772 | CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS 7773 | CPUMCTX_EXTRN_SYSENTER_MSRS); 7788 7774 7789 7775 #ifdef VBOX_STRICT … … 7798 7784 if (rcStrict == VINF_SUCCESS) 7799 7785 { 7800 /* We skip flagging changes made to LSTAR, STAR, SFMASK and other MSRs as they are always re-loaded. */ 7801 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS 7802 | HM_CHANGED_GUEST_GS 7803 | HM_CHANGED_GUEST_TR 7804 | HM_CHANGED_GUEST_LDTR 7805 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 7806 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 7807 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 7786 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS | HM_CHANGED_GUEST_GS 7787 | HM_CHANGED_GUEST_TR | HM_CHANGED_GUEST_LDTR 7788 | HM_CHANGED_GUEST_SYSENTER_MSR_MASK); 7808 7789 } 7809 7790 return VBOXSTRICTRC_VAL(rcStrict); … … 7852 7833 { 7853 7834 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7854 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7855 | IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK); 7835 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK); 7856 7836 VBOXSTRICTRC rcStrict; 7857 7837 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72929 r72963 2308 2308 2309 2309 fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */ 2310 |VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */2310 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */ 2311 2311 2312 2312 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI) … … 2460 2460 2461 2461 fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */ 2462 |VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */2463 |VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */2464 |VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */2465 |VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */2466 |VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */2467 |VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */2462 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */ 2463 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */ 2464 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */ 2465 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */ 2466 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */ 2467 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */ 2468 2468 2469 2469 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */ … … 2481 2481 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */ 2482 2482 fVal |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT 2483 |VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT2484 |VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;2483 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 2484 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 2485 2485 } 2486 2486 … … 2509 2509 { 2510 2510 fVal |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */ 2511 |VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */2511 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */ 2512 2512 } 2513 2513 } … … 3349 3349 { 3350 3350 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR 3351 |VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;3351 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR; 3352 3352 Log4Func(("VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR and VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n")); 3353 3353 } … … 3720 3720 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */ 3721 3721 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 3722 |VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;3722 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 3723 3723 } 3724 3724 … … 3768 3768 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 3769 3769 uXcptBitmap |= 0 3770 | RT_BIT(X86_XCPT_BP)3771 | RT_BIT(X86_XCPT_DE)3772 | RT_BIT(X86_XCPT_NM)3773 | RT_BIT(X86_XCPT_TS)3774 | RT_BIT(X86_XCPT_UD)3775 | RT_BIT(X86_XCPT_NP)3776 | RT_BIT(X86_XCPT_SS)3777 | RT_BIT(X86_XCPT_GP)3778 | RT_BIT(X86_XCPT_PF)3779 | RT_BIT(X86_XCPT_MF)3780 ;3770 | RT_BIT(X86_XCPT_BP) 3771 | RT_BIT(X86_XCPT_DE) 3772 | RT_BIT(X86_XCPT_NM) 3773 | RT_BIT(X86_XCPT_TS) 3774 | RT_BIT(X86_XCPT_UD) 3775 | RT_BIT(X86_XCPT_NP) 3776 | RT_BIT(X86_XCPT_SS) 3777 | RT_BIT(X86_XCPT_GP) 3778 | RT_BIT(X86_XCPT_PF) 3779 | RT_BIT(X86_XCPT_MF) 3780 ; 3781 3781 #elif defined(HMVMX_ALWAYS_TRAP_PF) 3782 3782 uXcptBitmap |= RT_BIT(X86_XCPT_PF); … … 3890 3890 3891 3891 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */ 3892 pVCpu->hm.s.vmx.HCPhysEPTP |= 3893 3892 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB 3893 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT); 3894 3894 3895 3895 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */ … … 4970 4970 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false; 4971 4971 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 4972 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 4973 4974 4975 4972 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR 4973 | HM_CHANGED_VMX_ENTRY_CTLS 4974 | HM_CHANGED_VMX_EXIT_CTLS 4975 | HM_CHANGED_HOST_CONTEXT); 4976 4976 Log4Func(("Selected 32-bit switcher (safe)\n")); 4977 4977 } … … 6894 6894 if (!fImportState) 6895 6895 { 6896 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE 6897 | CPUMCTX_EXTRN_SYSCALL_MSRS); 6896 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS); 6898 6897 AssertRCReturn(rc, rc); 6899 6898 } … … 7062 7061 /* Sync recompiler state. */ 7063 7062 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 7064 CPUMSetChangedFlags(pVCpu, 7065 7066 7067 7068 7069 7063 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR 7064 | CPUM_CHANGED_LDTR 7065 | CPUM_CHANGED_GDTR 7066 | CPUM_CHANGED_IDTR 7067 | CPUM_CHANGED_TR 7068 | CPUM_CHANGED_HIDDEN_SEL_REGS); 7070 7069 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 7071 7070 && CPUMIsGuestPagingEnabledEx(pMixedCtx)) … … 7753 7752 7754 7753 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */ 7755 int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK 7756 | CPUMCTX_EXTRN_TABLE_MASK 7757 | CPUMCTX_EXTRN_RIP 7758 | CPUMCTX_EXTRN_RSP 7759 | CPUMCTX_EXTRN_RFLAGS); 7754 int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_RIP 7755 | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS); 7760 7756 AssertRCReturn(rc2, rc2); 7761 7757 … … 7821 7817 /* If any other guest-state bits are changed here, make sure to update 7822 7818 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */ 7823 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS 7824 | HM_CHANGED_GUEST_CR2 7825 | HM_CHANGED_GUEST_RIP 7826 | HM_CHANGED_GUEST_RFLAGS 7827 | HM_CHANGED_GUEST_RSP); 7819 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2 7820 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 7821 | HM_CHANGED_GUEST_RSP); 7828 7822 7829 7823 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */ … … 9203 9197 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE)) 9204 9198 { 9205 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 9206 | CPUMCTX_EXTRN_CR4 9207 | CPUMCTX_EXTRN_APIC_TPR); 9199 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR); 9208 9200 AssertRC(rc); 9209 9201 … … 9830 9822 case VMX_EXIT_XRSTORS: 9831 9823 { 9832 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 9833 | CPUMCTX_EXTRN_CS); 9824 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 9834 9825 AssertRCReturn(rc, rc); 9835 9826 if ( pMixedCtx->rip != pDbgState->uRipStart … … 9978 9969 if (fStepping) 9979 9970 { 9980 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 9981 | CPUMCTX_EXTRN_CS); 9971 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 9982 9972 AssertRC(rc); 9983 9973 if ( pCtx->rip != DbgState.uRipStart … … 10378 10368 { 10379 10369 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 10380 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 10381 | CPUMCTX_EXTRN_RFLAGS); 10370 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); 10382 10371 AssertRCReturn(rc, rc); 10383 10372 … … 11340 11329 { 11341 11330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11342 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 11343 | CPUMCTX_EXTRN_TSC_AUX); 11331 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX); 11344 11332 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11345 11333 AssertRCReturn(rc, rc); … … 11370 11358 { 11371 11359 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11372 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4 11373 | CPUMCTX_EXTRN_CR0 11374 | CPUMCTX_EXTRN_RFLAGS 11375 | CPUMCTX_EXTRN_SS); 11360 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 11376 11361 AssertRCReturn(rc, rc); 11377 11362 … … 11402 11387 if (EMAreHypercallInstructionsEnabled(pVCpu)) 11403 11388 { 11404 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 11405 | CPUMCTX_EXTRN_RFLAGS 11406 | CPUMCTX_EXTRN_CR0 11407 | CPUMCTX_EXTRN_SS 11408 | CPUMCTX_EXTRN_CS 11409 | CPUMCTX_EXTRN_EFER); 11389 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS 11390 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER); 11410 11391 AssertRCReturn(rc, rc); 11411 11392 … … 11474 11455 { 11475 11456 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11476 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11477 | CPUMCTX_EXTRN_RFLAGS 11478 | CPUMCTX_EXTRN_SS); 11457 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 11479 11458 AssertRCReturn(rc, rc); 11480 11459 … … 11499 11478 { 11500 11479 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11501 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11502 | CPUMCTX_EXTRN_RFLAGS 11503 | CPUMCTX_EXTRN_SS); 11480 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS); 11504 11481 AssertRCReturn(rc, rc); 11505 11482 … … 11686 11663 11687 11664 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11688 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 11689 | CPUMCTX_EXTRN_CR4); 11665 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4); 11690 11666 AssertRCReturn(rc, rc); 11691 11667 … … 11939 11915 * the other bits as well, SCE and NXE. See @bugref{7368}. 11940 11916 */ 11941 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR 11942 | HM_CHANGED_VMX_ENTRY_CTLS 11943 | HM_CHANGED_VMX_EXIT_CTLS); 11917 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS 11918 | HM_CHANGED_VMX_EXIT_CTLS); 11944 11919 } 11945 11920 … … 12236 12211 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12237 12212 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12238 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 12239 | CPUMCTX_EXTRN_SREG_MASK 12240 | CPUMCTX_EXTRN_EFER); 12213 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER); 12241 12214 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 12242 12215 AssertRCReturn(rc, rc); … … 12591 12564 || rcStrict2 == VERR_PAGE_NOT_PRESENT) 12592 12565 { 12593 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12594 | HM_CHANGED_GUEST_RSP 12595 | HM_CHANGED_GUEST_RFLAGS 12596 | HM_CHANGED_GUEST_APIC_TPR); 12566 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 12567 | HM_CHANGED_GUEST_APIC_TPR); 12597 12568 rcStrict2 = VINF_SUCCESS; 12598 12569 } … … 12666 12637 */ 12667 12638 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12668 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK 12669 | CPUMCTX_EXTRN_DR7); 12639 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7); 12670 12640 AssertRCReturn(rc, rc); 12671 12641 Log4Func(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); … … 12757 12727 { 12758 12728 /* Successfully handled MMIO operation. */ 12759 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12760 | HM_CHANGED_GUEST_RSP 12761 | HM_CHANGED_GUEST_RFLAGS 12762 | HM_CHANGED_GUEST_APIC_TPR); 12729 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 12730 | HM_CHANGED_GUEST_APIC_TPR); 12763 12731 rcStrict = VINF_SUCCESS; 12764 12732 } … … 12845 12813 /* Successfully synced our nested page tables. */ 12846 12814 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); 12847 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12848 | HM_CHANGED_GUEST_RSP 12849 | HM_CHANGED_GUEST_RFLAGS); 12815 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 12850 12816 return VINF_SUCCESS; 12851 12817 } … … 13166 13132 pMixedCtx->esp &= uMask; 13167 13133 pMixedCtx->rip += pDis->cbInstr; 13168 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13169 | HM_CHANGED_GUEST_RSP 13170 | HM_CHANGED_GUEST_RFLAGS); 13134 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13171 13135 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how 13172 13136 POPF restores EFLAGS.TF. */ … … 13222 13186 pMixedCtx->rip += pDis->cbInstr; 13223 13187 pMixedCtx->eflags.Bits.u1RF = 0; 13224 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13225 | HM_CHANGED_GUEST_RSP 13226 | HM_CHANGED_GUEST_RFLAGS); 13188 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13227 13189 if ( !fDbgStepping 13228 13190 && pMixedCtx->eflags.Bits.u1TF) … … 13269 13231 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 13270 13232 pMixedCtx->sp += sizeof(aIretFrame); 13271 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13272 | HM_CHANGED_GUEST_CS 13273 | HM_CHANGED_GUEST_RSP 13274 | HM_CHANGED_GUEST_RFLAGS); 13233 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 13234 | HM_CHANGED_GUEST_CS); 13275 13235 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */ 13276 13236 if ( !fDbgStepping … … 13358 13318 13359 13319 #ifdef DEBUG_ramshankar 13360 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS 13361 | CPUMCTX_EXTRN_RIP); 13320 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 13362 13321 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo); 13363 13322 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
Note:
See TracChangeset
for help on using the changeset viewer.