Changeset 65933 in vbox for trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
- Timestamp:
- Mar 3, 2017 1:21:40 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r65913 r65933 5877 5877 #ifdef VBOX_WITH_NESTED_HWVIRT 5878 5878 /** 5879 * Implements 'VMLOAD'. 5880 */ 5881 IEM_CIMPL_DEF_0(iemCImpl_vmload) 5882 { 5883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5884 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload); 5885 #ifndef IN_RC 5886 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD)) 5887 { 5888 Log(("vmload: Guest intercept -> VMexit\n")); 5889 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMLOAD); 5890 return VINF_EM_RESCHEDULE; 5891 } 5892 #endif 5893 5894 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5895 if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5896 { 5897 Log(("vmload: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb)); 5898 return iemRaiseGeneralProtectionFault0(pVCpu); 5899 } 5900 5901 void *pvVmcb; 5902 PGMPAGEMAPLOCK PgLockVmcb; 5903 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, &pvVmcb, &PgLockVmcb); 5904 if (rcStrict == VINF_SUCCESS) 5905 { 5906 PCSVMVMCB pVmcb = (PCSVMVMCB)pvVmcb; 5907 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, FS, fs); 5908 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, GS, gs); 5909 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, TR, tr); 5910 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, LDTR, ldtr); 5911 5912 pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; 5913 pCtx->msrSTAR = pVmcb->guest.u64STAR; 5914 pCtx->msrLSTAR = pVmcb->guest.u64LSTAR; 5915 pCtx->msrCSTAR = pVmcb->guest.u64CSTAR; 5916 pCtx->msrSFMASK = pVmcb->guest.u64SFMASK; 5917 5918 pCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS; 5919 pCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP; 5920 pCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP; 5921 5922 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb); 5923 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5924 } 5925 return rcStrict; 5926 } 5927 5928 5929 /** 5930 * Implements 'VMSAVE'. 5931 */ 5932 IEM_CIMPL_DEF_0(iemCImpl_vmsave) 5933 { 5934 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5935 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave); 5936 #ifndef IN_RC 5937 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE)) 5938 { 5939 Log(("vmsave: Guest intercept -> VMexit\n")); 5940 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMSAVE); 5941 return VINF_EM_RESCHEDULE; 5942 } 5943 #endif 5944 5945 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5946 if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5947 { 5948 Log(("vmsave: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb)); 5949 return iemRaiseGeneralProtectionFault0(pVCpu); 5950 } 5951 5952 void *pvVmcb; 5953 PGMPAGEMAPLOCK PgLockVmcb; 5954 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb); 5955 if (rcStrict == VINF_SUCCESS) 5956 { 5957 PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb; 5958 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, FS, fs); 5959 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, GS, gs); 5960 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, TR, tr); 5961 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, LDTR, ldtr); 5962 5963 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; 5964 pVmcb->guest.u64STAR = pCtx->msrSTAR; 5965 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR; 5966 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR; 5967 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK; 5968 5969 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 5970 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 5971 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 5972 5973 iemMemPageUnmap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_R, pvVmcb, &PgLockVmcb); 5974 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5975 } 5976 return rcStrict; 5977 } 5978 5979 5980 /** 5879 5981 * Implements 'CLGI'. 5880 5982 */ … … 5882 5984 { 5883 5985 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5884 if (!(pCtx->msrEFER & MSR_K6_EFER_SVME)) 5885 { 5886 Log2(("clgi: EFER.SVME not enabled -> #UD\n")); 5887 return iemRaiseUndefinedOpcode(pVCpu); 5888 } 5889 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5890 { 5891 Log2(("clgi: Real or v8086 mode -> #UD\n")); 5892 return iemRaiseUndefinedOpcode(pVCpu); 5893 } 5894 if (pVCpu->iem.s.uCpl != 0) 5895 { 5896 Log2(("clgi: CPL != 0 -> #GP(0)\n")); 5897 return iemRaiseGeneralProtectionFault0(pVCpu); 5898 } 5986 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi); 5899 5987 #ifndef IN_RC 5900 5988 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) 5901 5989 { 5902 Log 2(("clgi: Guest intercept -> VMexit\n"));5990 Log(("clgi: Guest intercept -> VMexit\n")); 5903 5991 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI); 5904 5992 return VINF_EM_RESCHEDULE; … … 5918 6006 { 5919 6007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5920 if (!(pCtx->msrEFER & MSR_K6_EFER_SVME)) 5921 { 5922 Log2(("stgi: EFER.SVME not enabled -> #UD\n")); 5923 return iemRaiseUndefinedOpcode(pVCpu); 5924 } 5925 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5926 { 5927 Log2(("stgi: Real or v8086 mode -> #UD\n")); 5928 return iemRaiseUndefinedOpcode(pVCpu); 5929 } 5930 if (pVCpu->iem.s.uCpl != 0) 5931 { 5932 Log2(("stgi: CPL != 0 -> #GP(0)\n")); 5933 return iemRaiseGeneralProtectionFault0(pVCpu); 5934 } 6008 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi); 5935 6009 #ifndef IN_RC 5936 6010 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI)) … … 5943 6017 5944 6018 pCtx->hwvirt.svm.fGif = 1; 6019 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6020 return VINF_SUCCESS; 6021 } 6022 6023 6024 /** 6025 * Implements 'INVLPGA'. 6026 */ 6027 IEM_CIMPL_DEF_0(iemCImpl_invlpga) 6028 { 6029 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6030 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga); 6031 #ifndef IN_RC 6032 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA)) 6033 { 6034 Log2(("invlpga: Guest intercept -> VMexit\n")); 6035 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_INVLPGA); 6036 return VINF_EM_RESCHEDULE; 6037 } 6038 #endif 6039 6040 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 6041 /** @todo PGM needs virtual ASID support. */ 6042 #if 0 6043 uint32_t const uAsid = pCtx->ecx; 6044 #endif 6045 PGMInvalidatePage(pVCpu, GCPtrPage); 5945 6046 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5946 6047 return VINF_SUCCESS;
Note:
See TracChangeset
for help on using the changeset viewer.