Changeset 72661 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jun 22, 2018 11:36:36 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123172
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72655 r72661 2785 2785 2786 2786 Log4(("hmR0SvmImportGuestState: fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 2787 if (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL) 2788 { 2789 fWhat &= pCtx->fExtrn; 2790 2787 2788 /* 2789 * We disable interrupts to make the updating of the state and in particular 2790 * the fExtrn modification atomic wrt to preemption hooks. 2791 */ 2792 RTCCUINTREG const fSavedFlags = ASMIntDisableFlags(); 2793 2794 fWhat &= pCtx->fExtrn; 2795 if (fWhat & pCtx->fExtrn) 2796 { 2791 2797 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2792 2798 if (fWhat & CPUMCTX_EXTRN_HWVIRT) … … 2799 2805 pCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif; 2800 2806 } 2801 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HWVIRT);2802 2807 } 2803 2808 … … 2807 2812 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2808 2813 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2809 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ); 2810 } 2811 #else 2812 ASMAtomicUoAndU64(&pCtx->fExtrn, ~(CPUMCTX_EXTRN_HWVIRT | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)); 2814 } 2813 2815 #endif 2814 2816 … … 2819 2821 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2820 2822 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2821 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_SVM_INT_SHADOW);2822 2823 } 2823 2824 2824 2825 if (fWhat & CPUMCTX_EXTRN_RIP) 2825 {2826 2826 pCtx->rip = pVmcbGuest->u64RIP; 2827 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP);2828 }2829 2827 2830 2828 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 2831 {2832 2829 pCtx->eflags.u32 = pVmcbGuest->u64RFlags; 2833 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS);2834 }2835 2830 2836 2831 if (fWhat & CPUMCTX_EXTRN_RSP) 2837 {2838 2832 pCtx->rsp = pVmcbGuest->u64RSP; 2839 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP);2840 }2841 2833 2842 2834 if (fWhat & CPUMCTX_EXTRN_RAX) 2843 {2844 2835 pCtx->rax = pVmcbGuest->u64RAX; 2845 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RAX);2846 }2847 2836 2848 2837 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) … … 2865 2854 } 2866 2855 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs); 2867 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS);2868 2856 } 2869 2857 if (fWhat & CPUMCTX_EXTRN_SS) … … 2881 2869 if (pCtx->ss.Attr.n.u2Dpl != uCpl) 2882 2870 pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3; 2883 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS);2884 2871 } 2885 2872 if (fWhat & CPUMCTX_EXTRN_DS) … … 2887 2874 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds); 2888 2875 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds); 2889 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS);2890 2876 } 2891 2877 if (fWhat & CPUMCTX_EXTRN_ES) … … 2893 2879 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es); 2894 2880 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es); 2895 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES);2896 2881 } 2897 2882 if (fWhat & CPUMCTX_EXTRN_FS) … … 2899 2884 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs); 2900 2885 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs); 2901 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS);2902 2886 } 2903 2887 if (fWhat & CPUMCTX_EXTRN_GS) … … 2905 2889 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs); 2906 2890 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs); 2907 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS);2908 2891 } 2909 2892 } … … 2927 2910 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY; 2928 2911 } 2929 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR);2930 2912 } 2931 2913 2932 2914 if (fWhat & CPUMCTX_EXTRN_LDTR) 2933 {2934 2915 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr); 2935 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR);2936 }2937 2916 2938 2917 if (fWhat & CPUMCTX_EXTRN_GDTR) … … 2940 2919 pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit; 2941 2920 pCtx->gdtr.pGdt = pVmcbGuest->GDTR.u64Base; 2942 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR);2943 2921 } 2944 2922 … … 2947 2925 pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit; 2948 2926 pCtx->idtr.pIdt = pVmcbGuest->IDTR.u64Base; 2949 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR);2950 2927 } 2951 2928 } … … 2957 2934 pCtx->msrCSTAR = pVmcbGuest->u64CSTAR; 2958 2935 pCtx->msrSFMASK = pVmcbGuest->u64SFMASK; 2959 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS);2960 2936 } 2961 2937 … … 2965 2941 pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP; 2966 2942 pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP; 2967 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS);2968 2943 } 2969 2944 2970 2945 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 2971 {2972 2946 pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase; 2973 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE);2974 }2975 2947 2976 2948 if (fWhat & CPUMCTX_EXTRN_DR_MASK) … … 2982 2954 else 2983 2955 CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6); 2984 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR6);2985 2956 } 2986 2957 … … 2991 2962 else 2992 2963 Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu)); 2993 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7);2994 2964 } 2995 2965 } … … 3002 2972 uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP)) 3003 2973 | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP)); 2974 VMMRZCallRing3Disable(pVCpu); /* CPUM has log statements and calls into PGM. */ 3004 2975 CPUMSetGuestCR0(pVCpu, uCr0); 3005 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0);2976 VMMRZCallRing3Enable(pVCpu); 3006 2977 } 3007 2978 3008 2979 if (fWhat & CPUMCTX_EXTRN_CR2) 3009 {3010 2980 pCtx->cr2 = pVmcbGuest->u64CR2; 3011 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR2);3012 }3013 2981 3014 2982 if (fWhat & CPUMCTX_EXTRN_CR3) … … 3018 2986 { 3019 2987 CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3); 3020 if (VMMRZCallRing3IsEnabled(pVCpu)) 3021 { 3022 Log4(("hmR0SvmImportGuestState: Calling PGMUpdateCR3\n")); 3023 PGMUpdateCR3(pVCpu, pVmcbGuest->u64CR3); 3024 } 3025 else 3026 { 3027 Log4(("hmR0SvmImportGuestState: Setting VMCPU_FF_HM_UPDATE_CR3\n")); 3028 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 3029 } 2988 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 3030 2989 } 3031 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3);3032 2990 } 3033 2991 3034 2992 /* Changes to CR4 are always intercepted. */ 3035 2993 } 2994 2995 /* Update fExtrn. */ 2996 pCtx->fExtrn &= ~fWhat; 3036 2997 3037 2998 /* If everything has been imported, clear the HM keeper bit. */ 3038 2999 if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)) 3039 3000 { 3040 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM);3001 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM; 3041 3002 Assert(!pCtx->fExtrn); 3042 3003 } 3043 3004 } 3044 3005 else 3045 Assert(!pCtx->fExtrn); 3006 Assert(!pCtx->fExtrn || (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 3007 3008 ASMSetFlags(fSavedFlags); 3046 3009 3047 3010 /*
Note:
See TracChangeset
for help on using the changeset viewer.