Changeset 72484 in vbox
- Timestamp:
- Jun 8, 2018 5:05:40 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpumctx.h
r72415 r72484 781 781 /** The RIP register value is kept externally. */ 782 782 #define CPUMCTX_EXTRN_RIP UINT64_C(0x0000000000000004) 783 /** The RFLAGS register values are kept externally. */ 784 #define CPUMCTX_EXTRN_RFLAGS UINT64_C(0x0000000000000008) 785 786 /** The RAX register value is kept externally. */ 787 #define CPUMCTX_EXTRN_RAX UINT64_C(0x0000000000000010) 788 /** The RCX register value is kept externally. */ 789 #define CPUMCTX_EXTRN_RCX UINT64_C(0x0000000000000020) 790 /** The RDX register value is kept externally. */ 791 #define CPUMCTX_EXTRN_RDX UINT64_C(0x0000000000000040) 792 /** The RBX register value is kept externally. */ 793 #define CPUMCTX_EXTRN_RBX UINT64_C(0x0000000000000080) 794 /** The RSP register value is kept externally. */ 795 #define CPUMCTX_EXTRN_RSP UINT64_C(0x0000000000000100) 796 /** The RBP register value is kept externally. */ 797 #define CPUMCTX_EXTRN_RBP UINT64_C(0x0000000000000200) 798 /** The RSI register value is kept externally. */ 799 #define CPUMCTX_EXTRN_RSI UINT64_C(0x0000000000000400) 800 /** The RDI register value is kept externally. */ 801 #define CPUMCTX_EXTRN_RDI UINT64_C(0x0000000000000800) 802 /** The R8 thru R15 register values are kept externally. */ 803 #define CPUMCTX_EXTRN_R8_R15 UINT64_C(0x0000000000001000) 804 /** General purpose registers mask. */ 805 #define CPUMCTX_EXTRN_GPRS_MASK UINT64_C(0x0000000000001ff0) 806 807 /** The ES register values are kept externally. */ 808 #define CPUMCTX_EXTRN_ES UINT64_C(0x0000000000002000) 783 809 /** The CS register values are kept externally. */ 784 #define CPUMCTX_EXTRN_CS UINT64_C(0x0000000000000008) 785 /** The RFLAGS register values are kept externally. */ 786 #define CPUMCTX_EXTRN_RFLAGS UINT64_C(0x0000000000000010) 787 788 /** The RAX register value is kept externally. */ 789 #define CPUMCTX_EXTRN_RAX UINT64_C(0x0000000000000020) 790 /** The RCX register value is kept externally. */ 791 #define CPUMCTX_EXTRN_RCX UINT64_C(0x0000000000000040) 792 /** The RDX register value is kept externally. */ 793 #define CPUMCTX_EXTRN_RDX UINT64_C(0x0000000000000080) 794 /** The RBX register value is kept externally. */ 795 #define CPUMCTX_EXTRN_RBX UINT64_C(0x0000000000000100) 796 /** The RSP register value is kept externally. */ 797 #define CPUMCTX_EXTRN_RSP UINT64_C(0x0000000000000200) 798 /** The RBP register value is kept externally. */ 799 #define CPUMCTX_EXTRN_RBP UINT64_C(0x0000000000000400) 800 /** The RSI register value is kept externally. */ 801 #define CPUMCTX_EXTRN_RSI UINT64_C(0x0000000000000800) 802 /** The RDI register value is kept externally. */ 803 #define CPUMCTX_EXTRN_RDI UINT64_C(0x0000000000001000) 804 /** The R8 thru R15 register values are kept externally. */ 805 #define CPUMCTX_EXTRN_R8_R15 UINT64_C(0x0000000000002000) 806 /** General purpose registers mask. */ 807 #define CPUMCTX_EXTRN_GPRS_MASK UINT64_C(0x0000000000003fe0) 808 810 #define CPUMCTX_EXTRN_CS UINT64_C(0x0000000000004000) 809 811 /** The SS register values are kept externally. */ 810 #define CPUMCTX_EXTRN_SS UINT64_C(0x000000000000 4000)812 #define CPUMCTX_EXTRN_SS UINT64_C(0x0000000000008000) 811 813 /** The DS register values are kept externally. */ 812 #define CPUMCTX_EXTRN_DS UINT64_C(0x0000000000008000) 813 /** The ES register values are kept externally. */ 814 #define CPUMCTX_EXTRN_ES UINT64_C(0x0000000000010000) 814 #define CPUMCTX_EXTRN_DS UINT64_C(0x0000000000010000) 815 815 /** The FS register values are kept externally. */ 816 816 #define CPUMCTX_EXTRN_FS UINT64_C(0x0000000000020000) … … 818 818 #define CPUMCTX_EXTRN_GS UINT64_C(0x0000000000040000) 819 819 /** Segment registers (includes CS). */ 820 #define CPUMCTX_EXTRN_SREG_MASK UINT64_C(0x000000000007c008) 820 #define CPUMCTX_EXTRN_SREG_MASK UINT64_C(0x000000000007e000) 821 /** Converts a X86_XREG_XXX index to a CPUMCTX_EXTRN_xS mask. */ 822 #define CPUMCTX_EXTRN_SREG_FROM_IDX(a_SRegIdx) RT_BIT_64((a_SRegIdx) + 13) 823 #ifndef VBOX_FOR_DTRACE_LIB 824 AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_ES) == CPUMCTX_EXTRN_ES); 825 AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_CS) == CPUMCTX_EXTRN_CS); 826 AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_DS) == CPUMCTX_EXTRN_DS); 827 AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_FS) == CPUMCTX_EXTRN_FS); 828 AssertCompile(CPUMCTX_EXTRN_SREG_FROM_IDX(X86_SREG_GS) == CPUMCTX_EXTRN_GS); 829 #endif 821 830 822 831 /** The GDTR register values are kept externally. */ … … 841 850 /** Control register mask. */ 842 851 #define CPUMCTX_EXTRN_CR_MASK UINT64_C(0x0000000007800000) 852 /** The TPR/CR8 register value is kept externally. */ 853 #define CPUMCTX_EXTRN_APIC_TPR UINT64_C(0x0000000008000000) 843 854 /** The EFER register value is kept externally. */ 844 #define CPUMCTX_EXTRN_EFER UINT64_C(0x00000000 08000000)855 #define CPUMCTX_EXTRN_EFER UINT64_C(0x0000000010000000) 845 856 846 857 /** The DR0, DR1, DR2 and DR3 register values are kept externally. */ 847 #define CPUMCTX_EXTRN_DR0_DR3 UINT64_C(0x00000000 10000000)858 #define CPUMCTX_EXTRN_DR0_DR3 UINT64_C(0x0000000020000000) 848 859 /** The DR6 register value is kept externally. */ 849 #define CPUMCTX_EXTRN_DR6 UINT64_C(0x00000000 20000000)860 #define CPUMCTX_EXTRN_DR6 UINT64_C(0x0000000040000000) 850 861 /** The DR7 register value is kept externally. */ 851 #define CPUMCTX_EXTRN_DR7 UINT64_C(0x00000000 40000000)862 #define CPUMCTX_EXTRN_DR7 UINT64_C(0x0000000080000000) 852 863 /** Debug register mask. */ 853 #define CPUMCTX_EXTRN_DR_MASK UINT64_C(0x00000000 70000000)864 #define CPUMCTX_EXTRN_DR_MASK UINT64_C(0x00000000e0000000) 854 865 855 866 /** The XSAVE_C_X87 state is kept externally. */ 856 #define CPUMCTX_EXTRN_X87 UINT64_C(0x0000000 080000000)867 #define CPUMCTX_EXTRN_X87 UINT64_C(0x0000000100000000) 857 868 /** The XSAVE_C_SSE, XSAVE_C_YMM, XSAVE_C_ZMM_HI256, XSAVE_C_ZMM_16HI and 858 869 * XSAVE_C_OPMASK state is kept externally. */ 859 #define CPUMCTX_EXTRN_SSE_AVX UINT64_C(0x0000000 100000000)870 #define CPUMCTX_EXTRN_SSE_AVX UINT64_C(0x0000000200000000) 860 871 /** The state of XSAVE components not covered by CPUMCTX_EXTRN_X87 and 861 872 * CPUMCTX_EXTRN_SEE_AVX is kept externally. */ 862 #define CPUMCTX_EXTRN_OTHER_XSAVE UINT64_C(0x0000000 200000000)873 #define CPUMCTX_EXTRN_OTHER_XSAVE UINT64_C(0x0000000400000000) 863 874 /** The state of XCR0 and XCR1 register values are kept externally. */ 864 #define CPUMCTX_EXTRN_XCRx UINT64_C(0x0000000400000000) 875 #define CPUMCTX_EXTRN_XCRx UINT64_C(0x0000000800000000) 876 865 877 866 878 /** The KERNEL GS BASE MSR value is kept externally. */ 867 #define CPUMCTX_EXTRN_KERNEL_GS_BASE UINT64_C(0x000000 0800000000)879 #define CPUMCTX_EXTRN_KERNEL_GS_BASE UINT64_C(0x0000001000000000) 868 880 /** The STAR, LSTAR, CSTAR and SFMASK MSR values are kept externally. */ 869 #define CPUMCTX_EXTRN_SYSCALL_MSRS UINT64_C(0x000000 1000000000)881 #define CPUMCTX_EXTRN_SYSCALL_MSRS UINT64_C(0x0000002000000000) 870 882 /** The SYSENTER_CS, SYSENTER_EIP and SYSENTER_ESP MSR values are kept externally. */ 871 #define CPUMCTX_EXTRN_SYSENTER_MSRS UINT64_C(0x000000 2000000000)872 /** The SYSENTER_CS, SYSENTER_EIP and SYSENTER_ESP MSR values arekept externally. */873 #define CPUMCTX_EXTRN_TSC_AUX UINT64_C(0x000000 4000000000)883 #define CPUMCTX_EXTRN_SYSENTER_MSRS UINT64_C(0x0000004000000000) 884 /** The TSC_AUX MSR is kept externally. */ 885 #define CPUMCTX_EXTRN_TSC_AUX UINT64_C(0x0000008000000000) 874 886 /** All other stateful MSRs not covered by CPUMCTX_EXTRN_EFER, 875 887 * CPUMCTX_EXTRN_KERNEL_GS_BASE, CPUMCTX_EXTRN_SYSCALL_MSRS, 876 888 * CPUMCTX_EXTRN_SYSENTER_MSRS, and CPUMCTX_EXTRN_TSC_AUX. */ 877 #define CPUMCTX_EXTRN_OTHER_MSRS UINT64_C(0x00000 08000000000)889 #define CPUMCTX_EXTRN_OTHER_MSRS UINT64_C(0x0000010000000000) 878 890 879 891 /** Mask of all the MSRs. */ -
trunk/include/VBox/vmm/iem.h
r72209 r72484 177 177 178 178 179 /** The CPUMCTX_EXTRN_XXX mask required to be cleared when interpreting anything. 180 * IEM will ASSUME the caller of IEM APIs has ensured these are already present. */ 181 #define IEM_CPUMCTX_EXTRN_MUST_MASK ( CPUMCTX_EXTRN_GPRS_MASK \ 182 | CPUMCTX_EXTRN_RIP \ 183 | CPUMCTX_EXTRN_RFLAGS \ 184 | CPUMCTX_EXTRN_SS \ 185 | CPUMCTX_EXTRN_CS \ 186 | CPUMCTX_EXTRN_CR0 \ 187 | CPUMCTX_EXTRN_CR3 \ 188 | CPUMCTX_EXTRN_CR4 \ 189 | CPUMCTX_EXTRN_APIC_TPR \ 190 | CPUMCTX_EXTRN_EFER \ 191 | CPUMCTX_EXTRN_DR7 ) 192 /** The CPUMCTX_EXTRN_XXX mask needed when injecting an exception/interrupt. 193 * IEM will import missing bits, callers are encouraged to make these registers 194 * available prior to injection calls if fetching state anyway. */ 195 #define IEM_CPUMCTX_EXTRN_XCPT_MASK ( IEM_CPUMCTX_EXTRN_MUST_MASK \ 196 | CPUMCTX_EXTRN_CR2 \ 197 | CPUMCTX_EXTRN_SREG_MASK \ 198 | CPUMCTX_EXTRN_TABLE_MASK ) 199 200 179 201 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu); 180 202 VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten); -
trunk/include/VBox/vmm/nem.h
r72343 r72484 100 100 */ 101 101 VMM_INT_DECL(bool) NEMHCIsLongModeAllowed(PVM pVM); 102 VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat); 103 102 104 VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb); 103 105 VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r72358 r72484 600 600 pVCpu->cpum.s.Guest.gdtr.cbGdt = cbLimit; 601 601 pVCpu->cpum.s.Guest.gdtr.pGdt = GCPtrBase; 602 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_GDTR; 602 603 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_GDTR; 603 604 return VINF_SUCCESS; /* formality, consider it void. */ … … 613 614 pVCpu->cpum.s.Guest.idtr.cbIdt = cbLimit; 614 615 pVCpu->cpum.s.Guest.idtr.pIdt = GCPtrBase; 616 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_IDTR; 615 617 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_IDTR; 616 618 return VINF_SUCCESS; /* formality, consider it void. */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72451 r72484 102 102 #include <VBox/vmm/em.h> 103 103 #include <VBox/vmm/hm.h> 104 #include <VBox/vmm/nem.h> 104 105 #include <VBox/vmm/gim.h> 105 106 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM … … 992 993 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu); 993 994 995 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 994 996 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 995 997 … … 1134 1136 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu); 1135 1137 1138 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_MUST_MASK); 1136 1139 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 1137 1140 … … 1680 1683 { 1681 1684 #ifdef IN_RING3 1682 //__debugbreak();1683 1685 for (;;) 1684 1686 { … … 3517 3519 Assert(uCpl < 4); 3518 3520 3521 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 3519 3522 switch (pCtx->tr.Attr.n.u4Type) 3520 3523 { … … 3601 3604 *puRsp = 0; /* make gcc happy */ 3602 3605 3606 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 3603 3607 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5); 3604 3608 … … 3629 3633 { 3630 3634 case X86_XCPT_DB: 3635 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_DR7); 3631 3636 pCtx->dr[7] &= ~X86_DR7_GD; 3632 3637 break; … … 3659 3664 { 3660 3665 NOREF(uErr); NOREF(uCr2); 3666 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 3661 3667 3662 3668 /* … … 3929 3935 Assert(!IEM_IS_REAL_MODE(pVCpu)); 3930 3936 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 3937 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 3931 3938 3932 3939 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type; … … 4686 4693 uint64_t uCr2) 4687 4694 { 4695 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 4696 4688 4697 /* 4689 4698 * Read the IDT entry. … … 5148 5157 uint64_t uCr2) 5149 5158 { 5159 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 5160 5150 5161 /* 5151 5162 * Read the IDT entry. … … 5398 5409 { 5399 5410 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5411 5412 /* 5413 * Get all the state that we might need here. 5414 */ 5400 5415 #ifdef IN_RING0 5401 5416 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx); 5402 5417 AssertRCReturn(rc, rc); 5403 5418 #endif 5419 IEM_CTX_IMPORT_RET(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 5420 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 5404 5421 5405 5422 #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */ … … 6130 6147 Assert(iSegReg < X86_SREG_COUNT); 6131 6148 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6149 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6132 6150 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg]; 6133 6151 … … 6176 6194 Assert(iSegReg < X86_SREG_COUNT); 6177 6195 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6196 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6178 6197 return &pCtx->aSRegs[iSegReg].Sel; 6179 6198 } … … 6190 6209 { 6191 6210 Assert(iSegReg < X86_SREG_COUNT); 6192 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel; 6211 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6212 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6213 return pCtx->aSRegs[iSegReg].Sel; 6193 6214 } 6194 6215 … … 6204 6225 { 6205 6226 Assert(iSegReg < X86_SREG_COUNT); 6206 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base; 6227 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6228 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6229 return pCtx->aSRegs[iSegReg].u64Base; 6207 6230 } 6208 6231 … … 6302 6325 Assert(iSegReg < X86_SREG_COUNT); 6303 6326 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6327 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6304 6328 return &pCtx->aSRegs[iSegReg].u64Base; 6305 6329 } … … 6878 6902 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu); 6879 6903 #endif 6904 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6905 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6880 6906 } 6881 6907 … … 6921 6947 CPUMRZFpuStateActualizeForRead(pVCpu); 6922 6948 #endif 6949 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6950 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6923 6951 } 6924 6952 … … 6938 6966 CPUMRZFpuStateActualizeForChange(pVCpu); 6939 6967 #endif 6968 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6969 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6940 6970 } 6941 6971 … … 6956 6986 CPUMRZFpuStateActualizeSseForRead(pVCpu); 6957 6987 #endif 6988 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6989 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6958 6990 } 6959 6991 … … 6974 7006 CPUMRZFpuStateActualizeForChange(pVCpu); 6975 7007 #endif 7008 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7009 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6976 7010 } 6977 7011 … … 6992 7026 CPUMRZFpuStateActualizeAvxForRead(pVCpu); 6993 7027 #endif 7028 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7029 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6994 7030 } 6995 7031 … … 7010 7046 CPUMRZFpuStateActualizeForChange(pVCpu); 7011 7047 #endif 7048 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7049 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 7012 7050 } 7013 7051 … … 7930 7968 iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 7931 7969 { 7970 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 7971 7932 7972 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 7933 7973 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; … … 7968 8008 iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 7969 8009 { 8010 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 8011 7970 8012 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 7971 8013 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; … … 8010 8052 return VINF_SUCCESS; 8011 8053 8054 IEM_CTX_IMPORT_RET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 8012 8055 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 8013 8056 switch (pVCpu->iem.s.enmCpuMode) … … 9090 9133 } 9091 9134 9092 #endif 9135 #endif /* IEM_WITH_SETJMP */ 9093 9136 9094 9137 #ifndef IN_RING3 … … 9290 9333 if (iSegReg >= X86_SREG_FS) 9291 9334 { 9335 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9292 9336 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9293 9337 GCPtrMem += pSel->u64Base; … … 9302 9346 else 9303 9347 { 9348 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9304 9349 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9305 9350 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN)) … … 9344 9389 if (iSegReg >= X86_SREG_FS) 9345 9390 { 9391 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9346 9392 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9347 9393 GCPtrMem += pSel->u64Base; … … 9356 9402 else 9357 9403 { 9404 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9358 9405 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9359 9406 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE … … 10997 11044 AssertPtr(pDesc); 10998 11045 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 11046 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 10999 11047 11000 11048 /** @todo did the 286 require all 8 bytes to be accessible? */ … … 11337 11385 #define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg)) 11338 11386 #define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64 11339 #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)) 11340 #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)) 11341 #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)) 11342 #define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); 11343 #define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); 11387 #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \ 11388 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11389 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \ 11390 } while (0) 11391 #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \ 11392 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11393 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \ 11394 } while (0) 11395 #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \ 11396 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11397 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \ 11398 } while (0) 11399 /** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */ 11400 #define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \ 11401 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11402 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \ 11403 } while (0) 11404 #define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \ 11405 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11406 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \ 11407 } while (0) 11344 11408 #define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 11345 11409 #define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 11346 11410 #define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 11347 #define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel 11348 #define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel 11349 #define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel 11350 #define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel 11351 #define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel 11352 #define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel 11411 /** @todo IEM_MC_FETCH_LDTR_U16, IEM_MC_FETCH_LDTR_U32, IEM_MC_FETCH_LDTR_U64, IEM_MC_FETCH_TR_U16, IEM_MC_FETCH_TR_U32, and IEM_MC_FETCH_TR_U64 aren't worth it... */ 11412 #define IEM_MC_FETCH_LDTR_U16(a_u16Dst) do { \ 11413 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_LDTR); \ 11414 (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel; \ 11415 } while (0) 11416 #define IEM_MC_FETCH_LDTR_U32(a_u32Dst) do { \ 11417 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_LDTR); \ 11418 (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel; \ 11419 } while (0) 11420 #define IEM_MC_FETCH_LDTR_U64(a_u64Dst) do { \ 11421 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_LDTR); \ 11422 (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel; \ 11423 } while (0) 11424 #define IEM_MC_FETCH_TR_U16(a_u16Dst) do { \ 11425 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_TR); \ 11426 (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel; \ 11427 } while (0) 11428 #define IEM_MC_FETCH_TR_U32(a_u32Dst) do { \ 11429 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_TR); \ 11430 (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel; \ 11431 } while (0) 11432 #define IEM_MC_FETCH_TR_U64(a_u64Dst) do { \ 11433 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_TR); \ 11434 (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel; \ 11435 } while (0) 11353 11436 /** @note Not for IOPL or IF testing or modification. */ 11354 11437 #define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u … … 11367 11450 #define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX 11368 11451 #define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0) 11369 #define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value) 11370 #define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */ 11452 /** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */ 11453 #define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \ 11454 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11455 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \ 11456 } while (0) 11457 #define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \ 11458 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11459 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \ 11460 } while (0) 11371 11461 #define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \ 11372 11462 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0) … … 13805 13895 13806 13896 13897 /** 13898 * Used to dynamically imports state residing in NEM or HM. 13899 * 13900 * @returns VBox status code. 13901 * @param pVCpu The cross context virtual CPU structure of the calling thread. 13902 * @param pCtx The CPU context structure. 13903 * @param fExtrnImport The fields to import. 13904 */ 13905 int iemCtxImport(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fExtrnImport) 13906 { 13907 switch (pCtx->fExtrn & CPUMCTX_EXTRN_KEEPER_MASK) 13908 { 13909 #ifndef IN_RC 13910 case CPUMCTX_EXTRN_KEEPER_NEM: 13911 { 13912 int rc = NEMImportStateOnDemand(pVCpu, pCtx, fExtrnImport); 13913 Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc)); 13914 return rc; 13915 } 13916 13917 case CPUMCTX_EXTRN_KEEPER_HM: /** @todo make HM use CPUMCTX_EXTRN_XXX. */ 13918 #endif 13919 13920 default: 13921 AssertLogRelMsgFailed(("%RX64\n", fExtrnImport)); 13922 #ifdef IN_RC 13923 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fExtrnImport); 13924 #endif 13925 return VERR_IEM_IPE_9; 13926 } 13927 } 13928 13929 13807 13930 /** @} */ 13808 13931 … … 14409 14532 PVMCPU pVCpu = pVCpu; 14410 14533 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST; 14411 # ifdef IEM_VERIFICATION_MODE_FULL_HM14534 # ifdef IEM_VERIFICATION_MODE_FULL_HM 14412 14535 if ( HMIsEnabled(pVM) 14413 14536 && pVCpu->iem.s.cIOReads == 0 … … 14431 14554 rc = VINF_SUCCESS; 14432 14555 } 14433 # endif14556 # endif 14434 14557 if ( rc == VERR_EM_CANNOT_EXEC_GUEST 14435 14558 || rc == VINF_IOM_R3_IOPORT_READ … … 14525 14648 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState); 14526 14649 14527 # if 1 /* The recompiler doesn't update these the intel way. */14650 # if 1 /* The recompiler doesn't update these the intel way. */ 14528 14651 if (fRem) 14529 14652 { … … 14539 14662 pOrgXState->x87.FSW = pDebugXState->x87.FSW; 14540 14663 } 14541 # endif14664 # endif 14542 14665 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87))) 14543 14666 { … … 14870 14993 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx); 14871 14994 } 14872 #endif 14995 #endif /* LOG_ENABLED */ 14873 14996 14874 14997 … … 15619 15742 if (pVCpu->iem.s.cActiveMappings > 0) 15620 15743 iemMemRollback(pVCpu); 15744 15621 15745 return rcStrict; 15622 15746 } … … 15644 15768 15645 15769 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr); 15646 15647 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 15770 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM 15648 15771 if (rcStrict == VINF_SVM_VMEXIT) 15649 15772 rcStrict = VINF_SUCCESS; 15650 # endif15773 # endif 15651 15774 15652 15775 /** @todo Are there any other codes that imply the event was successfully … … 15654 15777 if ( rcStrict == VINF_SUCCESS 15655 15778 || rcStrict == VINF_IEM_RAISED_XCPT) 15656 {15657 15779 TRPMResetTrap(pVCpu); 15658 } 15780 15659 15781 return rcStrict; 15660 15782 #endif … … 16200 16322 16201 16323 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 16324 16202 16325 /** 16203 16326 * Interface for HM and EM to emulate the CLGI instruction. … … 16320 16443 VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) 16321 16444 { 16445 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_MUST_MASK); 16322 16446 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2); 16323 16447 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 16324 16448 } 16449 16325 16450 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ 16326 16327 16451 #ifdef IN_RING3 16328 16452 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r72451 r72484 42 42 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap); 43 43 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64)); 44 45 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx, CPUMCTX_EXTRN_TR); 44 46 45 47 /* … … 229 231 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 230 232 #endif 233 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_MASK); 231 234 232 235 if ( uCpl > pSReg->Attr.n.u2Dpl … … 990 993 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL); 991 994 RT_NOREF_PV(enmEffOpSize); 995 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 996 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 992 997 993 998 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl … … 1008 1013 } 1009 1014 1010 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1011 1015 uint32_t uNextEip = pCtx->eip + cbInstr; 1012 1016 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, … … 1032 1036 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 1033 1037 RT_NOREF_PV(enmEffOpSize); 1038 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1039 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1034 1040 1035 1041 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl … … 1078 1084 } 1079 1085 1080 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1081 1086 uint32_t uNextEip = pCtx->eip + cbInstr; 1082 1087 return iemTaskSwitch(pVCpu, pCtx, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, … … 1102 1107 #else 1103 1108 RT_NOREF_PV(enmEffOpSize); 1109 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1110 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1104 1111 1105 1112 /* NB: Far jumps can only do intra-privilege transfers. Far calls support … … 1221 1228 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS); 1222 1229 } 1223 1224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1225 1230 1226 1231 if (enmBranch == IEMBRANCH_JUMP) … … 1757 1762 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 1758 1763 Assert((uSel & X86_SEL_MASK_OFF_RPL)); 1764 IEM_CTX_IMPORT_RET(pVCpu, IEM_GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_XCPT_MASK); 1759 1765 1760 1766 if (IEM_IS_LONG_MODE(pVCpu)) … … 2289 2295 return iemRaiseGeneralProtectionFault0(pVCpu); 2290 2296 } 2297 2298 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 2291 2299 2292 2300 /* Fetch the descriptor. */ … … 3063 3071 { 3064 3072 RT_NOREF_PV(cbInstr); 3073 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_MASK); 3065 3074 3066 3075 /* … … 3897 3906 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 3898 3907 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize); 3908 IEM_CTX_IMPORT_RET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 3899 3909 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 3900 3910 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize); … … 3934 3944 return iemRaiseUndefinedOpcode(pVCpu); 3935 3945 } 3946 3947 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SYSCALL_MSRS); 3936 3948 3937 3949 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */ … … 4047 4059 } 4048 4060 4061 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SYSCALL_MSRS); 4062 4049 4063 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */ 4050 4064 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL; … … 4127 4141 IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) 4128 4142 { 4129 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/ 4143 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4144 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 4130 4145 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg); 4131 4146 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg); … … 4449 4464 if (uSel & X86_SEL_LDT) 4450 4465 { 4466 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_LDTR); 4451 4467 if ( !pCtx->ldtr.Attr.n.u1Present 4452 4468 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit ) … … 4456 4472 else 4457 4473 { 4474 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_GDTR); 4458 4475 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) 4459 4476 return VINF_IEM_SELECTOR_NOT_OK; … … 4693 4710 pCtx->gdtr.cbGdt = cbLimit; 4694 4711 pCtx->gdtr.pGdt = GCPtrBase; 4712 pCtx->fExtrn &= ~CPUMCTX_EXTRN_GDTR; 4695 4713 } 4696 4714 if (rcStrict == VINF_SUCCESS) … … 4728 4746 4729 4747 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4748 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_GDTR); 4730 4749 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst); 4731 4750 if (rcStrict == VINF_SUCCESS) … … 4773 4792 pCtx->idtr.cbIdt = cbLimit; 4774 4793 pCtx->idtr.pIdt = GCPtrBase; 4794 pCtx->fExtrn &= ~CPUMCTX_EXTRN_IDTR; 4775 4795 } 4776 4796 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 4807 4827 4808 4828 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4829 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_IDTR); 4809 4830 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst); 4810 4831 if (rcStrict == VINF_SUCCESS) … … 4855 4876 } 4856 4877 4857 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt)); 4878 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt)); 4879 pCtx->fExtrn &= ~CPUMCTX_EXTRN_LDTR; 4858 4880 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4859 4881 CPUMSetGuestLDTR(pVCpu, uNewLdt); … … 4887 4909 * Read the descriptor. 4888 4910 */ 4911 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR); 4889 4912 IEMSELDESC Desc; 4890 4913 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */ … … 4998 5021 * Read the descriptor. 4999 5022 */ 5023 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR); 5000 5024 IEMSELDESC Desc; 5001 5025 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */ … … 5108 5132 { 5109 5133 case 0: 5134 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0); 5110 5135 crX = pCtx->cr0; 5111 5136 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386) 5112 5137 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */ 5113 5138 break; 5114 case 2: crX = pCtx->cr2; break; 5115 case 3: crX = pCtx->cr3; break; 5116 case 4: crX = pCtx->cr4; break; 5139 case 2: 5140 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_CR2); 5141 crX = pCtx->cr2; 5142 break; 5143 case 3: 5144 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR3); 5145 crX = pCtx->cr3; 5146 break; 5147 case 4: 5148 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 5149 crX = pCtx->cr4; 5150 break; 5117 5151 case 8: 5118 5152 { 5153 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_APIC_TPR); 5119 5154 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5120 5155 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 5179 5214 * Perform checks. 5180 5215 */ 5216 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0); 5217 pCtx->fExtrn &= ~CPUMCTX_EXTRN_LDTR; 5218 5181 5219 uint64_t const uOldCrX = pCtx->cr0; 5182 5220 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS … … 5333 5371 } 5334 5372 pCtx->cr2 = uNewCrX; 5373 pCtx->fExtrn &= ~CPUMCTX_EXTRN_CR2; 5335 5374 rcStrict = VINF_SUCCESS; 5336 5375 break; … … 5348 5387 case 3: 5349 5388 { 5389 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR3); 5390 5350 5391 /* clear bit 63 from the source operand and indicate no invalidations are required. */ 5351 5392 if ( (pCtx->cr4 & X86_CR4_PCIDE) … … 5418 5459 case 4: 5419 5460 { 5461 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 5420 5462 uint64_t const uOldCrX = pCtx->cr4; 5421 5463 … … 5519 5561 case 8: 5520 5562 { 5563 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_APIC_TPR); 5521 5564 if (uNewCrX & ~(uint64_t)0xf) 5522 5565 { … … 5610 5653 * Compose the new CR0 value and call common worker. 5611 5654 */ 5655 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0); 5612 5656 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5613 5657 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); … … 5625 5669 5626 5670 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5671 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0); 5627 5672 uint64_t uNewCr0 = pCtx->cr0; 5628 5673 uNewCr0 &= ~X86_CR0_TS; … … 5649 5694 return iemRaiseGeneralProtectionFault0(pVCpu); 5650 5695 Assert(!pCtx->eflags.Bits.u1VM); 5696 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR0); 5651 5697 5652 5698 if ( (iDrReg == 4 || iDrReg == 5) … … 5670 5716 switch (iDrReg) 5671 5717 { 5672 case 0: drX = pCtx->dr[0]; break; 5673 case 1: drX = pCtx->dr[1]; break; 5674 case 2: drX = pCtx->dr[2]; break; 5675 case 3: drX = pCtx->dr[3]; break; 5718 case 0: 5719 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR0_DR3); 5720 drX = pCtx->dr[0]; 5721 break; 5722 case 1: 5723 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR0_DR3); 5724 drX = pCtx->dr[1]; 5725 break; 5726 case 2: 5727 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR0_DR3); 5728 drX = pCtx->dr[2]; 5729 break; 5730 case 3: 5731 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR0_DR3); 5732 drX = pCtx->dr[3]; 5733 break; 5676 5734 case 6: 5677 5735 case 4: 5736 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR6); 5678 5737 drX = pCtx->dr[6]; 5679 5738 drX |= X86_DR6_RA1_MASK; … … 5682 5741 case 7: 5683 5742 case 5: 5743 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_DR7); 5684 5744 drX = pCtx->dr[7]; 5685 5745 drX |=X86_DR7_RA1_MASK; … … 5727 5787 return iemRaiseGeneralProtectionFault0(pVCpu); 5728 5788 Assert(!pCtx->eflags.Bits.u1VM); 5789 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR4); 5729 5790 5730 5791 if (iDrReg == 4 || iDrReg == 5) … … 5806 5867 * Do the actual setting. 5807 5868 */ 5869 if (iDrReg < 4) 5870 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR0_DR3); 5871 else if (iDrReg == 6) 5872 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR6); 5808 5873 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5809 5874 { … … 5831 5896 return iemRaiseGeneralProtectionFault0(pVCpu); 5832 5897 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM); 5898 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); 5833 5899 5834 5900 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG)) … … 5883 5949 return iemRaiseGeneralProtectionFault0(pVCpu); 5884 5950 } 5951 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); 5885 5952 5886 5953 /* … … 5970 6037 return iemRaiseUndefinedOpcode(pVCpu); 5971 6038 6039 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 5972 6040 if ( (pCtx->cr4 & X86_CR4_TSD) 5973 6041 && pVCpu->iem.s.uCpl != 0) … … 6015 6083 return iemRaiseUndefinedOpcode(pVCpu); 6016 6084 6085 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 6017 6086 if ( (pCtx->cr4 & X86_CR4_TSD) 6018 6087 && pVCpu->iem.s.uCpl != 0) … … 6033 6102 * Query the MSR first in case of trips to ring-3. 6034 6103 */ 6104 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_TSC_AUX); 6035 6105 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); 6036 6106 if (rcStrict == VINF_SUCCESS) … … 6060 6130 { 6061 6131 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6132 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 6133 6062 6134 if ( pVCpu->iem.s.uCpl != 0 6063 6135 && !(pCtx->cr4 & X86_CR4_PCE)) … … 6111 6183 } 6112 6184 #endif 6185 6186 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ALL_MSRS); 6113 6187 6114 6188 rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u); … … 6179 6253 #endif 6180 6254 6255 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ALL_MSRS); 6256 6181 6257 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 6182 6258 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u); … … 6295 6371 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM)))) 6296 6372 { 6373 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6); 6297 6374 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg); 6298 6375 if (rcStrict == VINF_EM_RAW_GUEST_TRAP) … … 6392 6469 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM)))) 6393 6470 { 6471 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6); 6394 6472 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg); 6395 6473 if (rcStrict == VINF_EM_RAW_GUEST_TRAP) … … 6420 6498 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 6421 6499 uint32_t const fEflOld = fEfl; 6500 6501 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4); 6422 6502 if (pCtx->cr0 & X86_CR0_PE) 6423 6503 { … … 6463 6543 uint32_t const fEflOld = fEfl; 6464 6544 6545 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4); 6465 6546 if (pCtx->cr0 & X86_CR0_PE) 6466 6547 { … … 6675 6756 */ 6676 6757 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6758 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_OTHER_MSRS); 6677 6759 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE; 6678 6760 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base; … … 6698 6780 } 6699 6781 6782 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ALL_MSRS); 6700 6783 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx); 6701 6784 pCtx->rax &= UINT32_C(0xffffffff); … … 7012 7095 { 7013 7096 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7097 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 7014 7098 if (pCtx->cr4 & X86_CR4_OSXSAVE) 7015 7099 { … … 7026 7110 7027 7111 } 7112 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_XCRx); 7028 7113 pCtx->rax = RT_LO_U32(pCtx->aXcr[uEcx]); 7029 7114 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]); … … 7054 7139 if (pVCpu->iem.s.uCpl == 0) 7055 7140 { 7141 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_XCRx); 7142 7056 7143 uint32_t uEcx = pCtx->ecx; 7057 7144 uint64_t uNewValue = RT_MAKE_U64(pCtx->eax, pCtx->edx); … … 7201 7288 { 7202 7289 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7290 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7203 7291 7204 7292 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS)) … … 7238 7326 { 7239 7327 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7328 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7240 7329 7241 7330 /* … … 7342 7431 { 7343 7432 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7433 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7344 7434 7345 7435 /* … … 7456 7546 { 7457 7547 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7548 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 7458 7549 7459 7550 /* … … 7612 7703 { 7613 7704 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7705 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 7614 7706 7615 7707 /* … … 7816 7908 { 7817 7909 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7910 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7818 7911 7819 7912 /* … … 7850 7943 { 7851 7944 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7945 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx); 7852 7946 7853 7947 /* … … 7886 7980 { 7887 7981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7982 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7888 7983 7889 7984 /* … … 7931 8026 static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx) 7932 8027 { 8028 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7933 8029 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87; 7934 8030 if (enmEffOpSize == IEMMODE_16BIT) … … 7994 8090 static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx) 7995 8091 { 8092 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7996 8093 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87; 7997 8094 if (enmEffOpSize == IEMMODE_16BIT) … … 8094 8191 { 8095 8192 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 8193 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8194 8096 8195 RTPTRUNION uPtr; 8097 8196 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, … … 8207 8306 { 8208 8307 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 8308 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8209 8309 8210 8310 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */ … … 8233 8333 { 8234 8334 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 8335 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8235 8336 8236 8337 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; … … 8283 8384 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 8284 8385 Assert(iStReg < 8); 8386 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8285 8387 8286 8388 /* -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r72209 r72484 168 168 } 169 169 170 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES); 171 170 172 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg); 171 173 uint64_t uSrc1Base; … … 337 339 } 338 340 341 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES); 342 339 343 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg); 340 344 uint64_t uSrc1Base; … … 506 510 } 507 511 512 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ES); 508 513 uint64_t uBaseAddr; 509 514 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); … … 638 643 } 639 644 645 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ES); 640 646 uint64_t uBaseAddr; 641 647 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); … … 771 777 } 772 778 779 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES); 780 773 781 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg); 774 782 uint64_t uSrcBase; … … 945 953 } 946 954 955 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ES); 956 947 957 uint64_t uBaseAddr; 948 958 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); … … 1078 1088 } 1079 1089 1090 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg)); 1080 1091 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg); 1081 1092 uint64_t uBaseAddr; … … 1278 1289 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1279 1290 1291 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR); 1292 1280 1293 /* 1281 1294 * Setup. … … 1285 1298 if (!fIoChecked) 1286 1299 { 1300 /** @todo check if this is too early for ecx=0. */ 1287 1301 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, OP_SIZE / 8); 1288 1302 if (rcStrict != VINF_SUCCESS) … … 1550 1564 if (!fIoChecked) 1551 1565 { 1566 /** @todo check if this is too early for ecx=0. */ 1552 1567 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, OP_SIZE / 8); 1553 1568 if (rcStrict != VINF_SUCCESS) -
trunk/src/VBox/VMM/VMMAll/NEMAll.cpp
r72343 r72484 122 122 } 123 123 124 125 #ifndef VBOX_WITH_NATIVE_NEM 126 VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 127 { 128 RT_NOREF(pVCpu, pCtx, fWhat); 129 return VERR_NEM_IPE_9; 130 } 131 #endif 132 -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r72472 r72484 238 238 ADD_REG64(WHvX64RegisterCr4, pCtx->cr4); 239 239 } 240 241 /** @todo CR8/TPR */ 242 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu)); 240 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 241 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu)); 243 242 244 243 /* Debug registers. */ … … 537 536 aenmNames[iReg++] = WHvX64RegisterCr4; 538 537 } 539 aenmNames[iReg++] = WHvX64RegisterCr8; /// @todo CR8/TPR 538 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 539 aenmNames[iReg++] = WHvX64RegisterCr8; 540 540 541 541 /* Debug registers. */ … … 830 830 } 831 831 } 832 833 /// @todo CR8/TPR 834 Assert(aenmNames[iReg] == WHvX64RegisterCr8); 835 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4); 836 iReg++; 832 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 833 { 834 Assert(aenmNames[iReg] == WHvX64RegisterCr8); 835 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4); 836 iReg++; 837 } 837 838 838 839 /* Debug registers. */ … … 1063 1064 1064 1065 #endif /* !IN_RING0 */ 1066 1067 1068 /** 1069 * Interface for importing state on demand (used by IEM). 1070 * 1071 * @returns VBox status code. 1072 * @param pVCpu The cross context CPU structure. 1073 * @param pCtx The target CPU context. 1074 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 1075 */ 1076 VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 1077 { 1078 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand); 1079 1080 #ifdef IN_RING0 1081 /** @todo improve and secure this translation */ 1082 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf); 1083 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE); 1084 VMCPUID idCpu = pVCpu->idCpu; 1085 ASMCompilerBarrier(); 1086 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE); 1087 1088 return nemR0WinImportState(pGVM, &pGVM->aCpus[idCpu], pCtx, fWhat); 1089 #else 1090 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat); 1091 #endif 1092 } 1065 1093 1066 1094 … … 1599 1627 #else 1600 1628 RT_NOREF(pGVCpu, pszCaller); 1601 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);1629 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat); 1602 1630 AssertRCReturn(rc, rc); 1603 1631 #endif … … 1759 1787 VBOXSTRICTRC rcStrict; 1760 1788 # ifdef IN_RING0 1761 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "MemExit"); 1789 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, 1790 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit"); 1762 1791 if (rcStrict != VINF_SUCCESS) 1763 1792 return rcStrict; 1764 1793 # else 1765 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM );1794 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 1766 1795 AssertRCReturn(rc, rc); 1767 1796 NOREF(pGVCpu); … … 1845 1874 */ 1846 1875 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext); 1847 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM );1876 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES); 1848 1877 AssertRCReturn(rc, rc); 1849 1878 … … 2114 2143 } 2115 2144 #endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */ 2145 2116 2146 2117 2147 #ifdef NEM_WIN_USE_OUR_OWN_RUN_API … … 2398 2428 * If we get down here, we're supposed to #GP(0). 2399 2429 */ 2400 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "MSR"); 2430 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, 2431 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR"); 2401 2432 if (rcStrict == VINF_SUCCESS) 2402 2433 { … … 2505 2536 * If we get down here, we're supposed to #GP(0). 2506 2537 */ 2507 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "MSR"); 2538 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, 2539 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR"); 2508 2540 if (rcStrict == VINF_SUCCESS) 2509 2541 { … … 2671 2703 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter)); 2672 2704 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, pCtx, true /*fClearXcpt*/); 2673 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "Xcpt"); 2705 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM; 2706 if (pMsg->ExceptionVector == X86_XCPT_DB) 2707 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6; 2708 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, fWhat, "Xcpt"); 2674 2709 if (rcStrict != VINF_SUCCESS) 2675 2710 return rcStrict; … … 2766 2801 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter )); 2767 2802 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/); 2768 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "Xcpt"); 2803 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM; 2804 if (pMsg->ExceptionVector == X86_XCPT_DB) 2805 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6; 2806 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, fWhat, "Xcpt"); 2769 2807 if (rcStrict != VINF_SUCCESS) 2770 2808 return rcStrict; … … 3259 3297 } 3260 3298 else 3299 { 3300 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd); 3261 3301 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n", 3262 3302 VBOXSTRICTRC_VAL(rcStrict) )); 3303 } 3263 3304 return rcStrict; 3264 3305 } … … 3284 3325 3285 3326 /* 3286 * First update APIC. 3327 * First update APIC. We ASSUME this won't need TPR/CR8. 3287 3328 */ 3288 3329 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC)) … … 3309 3350 if (pCtx->fExtrn & fNeedExtrn) 3310 3351 { 3311 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IntFF"); 3352 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, 3353 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF"); 3312 3354 if (rcStrict != VINF_SUCCESS) 3313 3355 return rcStrict; … … 3324 3366 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 3325 3367 { 3326 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "NMI"); 3368 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, 3369 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI"); 3327 3370 if (rcStrict == VINF_SUCCESS) 3328 3371 { … … 3345 3388 && pCtx->rflags.Bits.u1IF) 3346 3389 { 3347 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "NMI"); 3390 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR); 3391 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, 3392 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI"); 3348 3393 if (rcStrict == VINF_SUCCESS) 3349 3394 { -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72462 r72484 7154 7154 in the VT-x part of the sources instead of the generic stuff. */ 7155 7155 int rc; 7156 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported) 7156 PVM pVM = pVCpu->CTX_SUFF(pVM); 7157 if ( pVM->hm.s.vmx.fSupported 7158 && VM_IS_HM_ENABLED(pVM)) 7157 7159 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7158 7160 else -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r72412 r72484 831 831 } 832 832 } 833 /** @todo CR8/TPR */ 834 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 835 pInput->Elements[iReg].Name = HvX64RegisterCr8; 836 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu); 837 iReg++; 833 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 834 { 835 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 836 pInput->Elements[iReg].Name = HvX64RegisterCr8; 837 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu); 838 iReg++; 839 } 838 840 839 841 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */ … … 1389 1391 pInput->Names[iReg++] = HvX64RegisterCr4; 1390 1392 } 1391 pInput->Names[iReg++] = HvX64RegisterCr8; /// @todo CR8/TPR 1393 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 1394 pInput->Names[iReg++] = HvX64RegisterCr8; 1392 1395 1393 1396 /* Debug registers. */ … … 1730 1733 } 1731 1734 } 1732 1733 /// @todo CR8/TPR 1734 Assert(pInput->Names[iReg] == HvX64RegisterCr8); 1735 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4); 1736 iReg++; 1735 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 1736 { 1737 Assert(pInput->Names[iReg] == HvX64RegisterCr8); 1738 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4); 1739 iReg++; 1740 } 1737 1741 1738 1742 /* Debug registers. */ -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r72475 r72484 1227 1227 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", iCpu); 1228 1228 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPending, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of pending CPU stops", "/NEM/CPU%u/StopCpuPending", iCpu); 1229 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuPendingOdd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of odd pending CPU stops (see code)", "/NEM/CPU%u/StopCpuPendingOdd", iCpu); 1229 1230 STAMR3RegisterF(pVM, &pNemCpu->StatCancelChangedState, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel changed state", "/NEM/CPU%u/CancelChangedState", iCpu); 1230 1231 STAMR3RegisterF(pVM, &pNemCpu->StatCancelAlertedThread, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel alerted EMT", "/NEM/CPU%u/CancelAlertedEMT", iCpu); … … 1233 1234 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnCancel, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of cancel execution breaks", "/NEM/CPU%u/BreakOnCancel", iCpu); 1234 1235 STAMR3RegisterF(pVM, &pNemCpu->StatBreakOnStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of status code breaks", "/NEM/CPU%u/BreakOnStatus", iCpu); 1236 STAMR3RegisterF(pVM, &pNemCpu->StatImportOnDemand, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of on-demand state imports", "/NEM/CPU%u/ImportOnDemand", iCpu); 1235 1237 } 1236 1238 -
trunk/src/VBox/VMM/include/IEMInternal.h
r69111 r72484 789 789 #endif 790 790 791 /** @def IEM_CTX_ASSERT 792 * Asserts that the @a a_fExtrnMbz is present in the CPU context. 793 * @param a_pCtx The CPUMCTX structure. 794 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero. 795 */ 796 #define IEM_CTX_ASSERT(a_pCtx, a_fExtrnMbz) Assert(!((a_pCtx)->fExtrn & (a_fExtrnMbz))) 797 798 /** @def IEM_CTX_IMPORT_RET 799 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported. 800 * 801 * Will call the keep to import the bits as needed. 802 * 803 * Returns on import failure. 804 * 805 * @param a_pVCpu The cross context virtual CPU structure. 806 * @param a_pCtx The CPUMCTX structure. 807 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import. 808 */ 809 #define IEM_CTX_IMPORT_RET(a_pVCpu, a_pCtx, a_fExtrnImport) \ 810 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \ 811 { /* likely */ } \ 812 else do { \ 813 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \ 814 AssertRCReturn(rcCtxImport, rcCtxImport); \ 815 } while (0) 816 817 /** @def IEM_CTX_IMPORT_NORET 818 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported. 819 * 820 * Will call the keep to import the bits as needed. 821 * 822 * @param a_pVCpu The cross context virtual CPU structure. 823 * @param a_pCtx The CPUMCTX structure. 824 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import. 825 */ 826 #define IEM_CTX_IMPORT_NORET(a_pVCpu, a_pCtx, a_fExtrnImport) \ 827 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \ 828 { /* likely */ } \ 829 else do { \ 830 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \ 831 AssertLogRelRC(rcCtxImport); \ 832 } while (0) 833 834 /** @def IEM_CTX_IMPORT_JMP 835 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported. 836 * 837 * Will call the keep to import the bits as needed. 838 * 839 * Jumps on import failure. 840 * 841 * @param a_pVCpu The cross context virtual CPU structure. 842 * @param a_pCtx The CPUMCTX structure. 843 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import. 844 */ 845 #define IEM_CTX_IMPORT_JMP(a_pVCpu, a_pCtx, a_fExtrnImport) \ 846 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \ 847 { /* likely */ } \ 848 else do { \ 849 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \ 850 AssertRCStmt(rcCtxImport, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), rcCtxImport)); \ 851 } while (0) 852 853 int iemCtxImport(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fExtrnImport); 854 855 791 856 /** Gets the current IEMTARGETCPU value. 792 857 * @returns IEMTARGETCPU value. -
trunk/src/VBox/VMM/include/NEMInternal.h
r72470 r72484 80 80 81 81 /** The CPUMCTX_EXTRN_XXX mask for IEM. */ 82 # define NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) 82 # define NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM ( IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT \ 83 | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI ) 84 /** The CPUMCTX_EXTRN_XXX mask for IEM when raising exceptions. */ 85 # define NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT (IEM_CPUMCTX_EXTRN_XCPT_MASK | NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM) 83 86 84 87 /** @name Windows: Interrupt window flags (NEM_WIN_INTW_F_XXX). … … 261 264 STAMCOUNTER StatStopCpuSuccess; 262 265 STAMCOUNTER StatStopCpuPending; 266 STAMCOUNTER StatStopCpuPendingOdd; 263 267 STAMCOUNTER StatCancelChangedState; 264 268 STAMCOUNTER StatCancelAlertedThread; … … 267 271 STAMCOUNTER StatBreakOnFFPost; 268 272 STAMCOUNTER StatBreakOnStatus; 273 STAMCOUNTER StatImportOnDemand; 269 274 /** @} */ 270 275 #endif /* RT_OS_WINDOWS */
Note:
See TracChangeset
for help on using the changeset viewer.