Changeset 72497 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jun 10, 2018 5:33:31 PM (7 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72496 r72497 3238 3238 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT; 3239 3239 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector, 3240 uCurVector, IEM_GET_CTX(pVCpu)->cr2));3240 uCurVector, pVCpu->cpum.GstCtx.cr2)); 3241 3241 } 3242 3242 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY … … 5545 5545 { 5546 5546 /** @todo set/clear RF. */ 5547 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;5547 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD; 5548 5548 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 5549 5549 } … … 5582 5582 { 5583 5583 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 5584 IEM_GET_CTX(pVCpu)->tr.Sel, 0);5584 pVCpu->cpum.GstCtx.tr.Sel, 0); 5585 5585 } 5586 5586 … … 5749 5749 5750 5750 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE 5751 && ( ( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)5752 && ( IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )5751 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) 5752 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) ) 5753 5753 uErr |= X86_TRAP_PF_ID; 5754 5754 … … 5958 5958 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr); 5959 5959 #else 5960 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);5960 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip); 5961 5961 #endif 5962 5962 } … … 6233 6233 { 6234 6234 Assert(iReg < 16); 6235 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;6235 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16; 6236 6236 } 6237 6237 … … 6247 6247 { 6248 6248 Assert(iReg < 16); 6249 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;6249 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32; 6250 6250 } 6251 6251 … … 6261 6261 { 6262 6262 Assert(iReg < 16); 6263 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;6263 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64; 6264 6264 } 6265 6265 … … 7311 7311 { 7312 7312 Assert(iStReg < 8); 7313 PX86FXSTATE pFpuCtx = & IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;7313 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7314 7314 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 7315 7315 pFpuCtx->FTW &= ~RT_BIT(iReg); … … 7324 7324 IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu) 7325 7325 { 7326 PX86FXSTATE pFpuCtx = & IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;7326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7327 7327 uint16_t uFsw = pFpuCtx->FSW; 7328 7328 uint16_t uTop = uFsw & X86_FSW_TOP_MASK; … … 7341 7341 IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu) 7342 7342 { 7343 PX86FXSTATE pFpuCtx = & IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;7343 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7344 7344 uint16_t uFsw = pFpuCtx->FSW; 7345 7345 uint16_t uTop = uFsw & X86_FSW_TOP_MASK; … … 7628 7628 IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg) 7629 7629 { 7630 PX86FXSTATE pFpuCtx = & IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;7630 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7631 7631 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 7632 7632 if (pFpuCtx->FTW & RT_BIT(iReg)) … … 7638 7638 IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) 7639 7639 { 7640 PX86FXSTATE pFpuCtx = & IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;7640 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7641 7641 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 7642 7642 if (pFpuCtx->FTW & RT_BIT(iReg)) … … 7652 7652 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) 7653 7653 { 7654 PX86FXSTATE pFpuCtx = & IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;7654 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7655 7655 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW); 7656 7656 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK; … … 7668 7668 IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) 7669 7669 { 7670 PX86FXSTATE pFpuCtx = & IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;7670 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7671 7671 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW); 7672 7672 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK; … … 8012 8012 && ( (pVCpu->iem.s.uCpl == 3 8013 8013 && !(fAccess & IEM_ACCESS_WHAT_SYS)) 8014 || ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))8014 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP))) 8015 8015 { 8016 8016 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem)); … … 8032 8032 if ( (fAccess & IEM_ACCESS_TYPE_EXEC) 8033 8033 && (fFlags & X86_PTE_PAE_NX) 8034 && ( IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )8034 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) 8035 8035 { 8036 8036 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem)); … … 9428 9428 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 9429 9429 if ( (GCPtrMem & 15) 9430 && !( IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */9430 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 9431 9431 return iemRaiseGeneralProtectionFault0(pVCpu); 9432 9432 … … 9461 9461 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 9462 9462 if ( (GCPtrMem & 15) == 0 9463 || ( IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */9463 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 9464 9464 { 9465 9465 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); … … 9904 9904 /* The lazy approach for now... */ 9905 9905 if ( (GCPtrMem & 15) 9906 && !( IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */9906 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 9907 9907 return iemRaiseGeneralProtectionFault0(pVCpu); 9908 9908 … … 9935 9935 /* The lazy approach for now... */ 9936 9936 if ( (GCPtrMem & 15) == 0 9937 || ( IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */9937 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 9938 9938 { 9939 9939 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); … … 10542 10542 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W); 10543 10543 if (rcStrict == VINF_SUCCESS) 10544 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;10544 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10545 10545 return rcStrict; 10546 10546 } … … 10921 10921 #define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \ 10922 10922 do { \ 10923 if ( IEM_GET_CTX(pVCpu)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \10923 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \ 10924 10924 return iemRaiseDeviceNotAvailable(pVCpu); \ 10925 10925 } while (0) 10926 10926 #define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \ 10927 10927 do { \ 10928 if (( IEM_GET_CTX(pVCpu)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \10928 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \ 10929 10929 return iemRaiseDeviceNotAvailable(pVCpu); \ 10930 10930 } while (0) 10931 10931 #define IEM_MC_MAYBE_RAISE_FPU_XCPT() \ 10932 10932 do { \ 10933 if ( IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \10933 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \ 10934 10934 return iemRaiseMathFault(pVCpu); \ 10935 10935 } while (0) 10936 10936 #define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \ 10937 10937 do { \ 10938 if ( ( IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \10939 || !( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \10938 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \ 10939 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \ 10940 10940 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \ 10941 10941 return iemRaiseUndefinedOpcode(pVCpu); \ 10942 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \10942 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 10943 10943 return iemRaiseDeviceNotAvailable(pVCpu); \ 10944 10944 } while (0) 10945 10945 #define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \ 10946 10946 do { \ 10947 if ( ( IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \10948 || !( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \10947 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \ 10948 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \ 10949 10949 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \ 10950 10950 return iemRaiseUndefinedOpcode(pVCpu); \ 10951 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \10951 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 10952 10952 return iemRaiseDeviceNotAvailable(pVCpu); \ 10953 10953 } while (0) 10954 10954 #define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \ 10955 10955 do { \ 10956 if ( ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \10957 || !( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \10956 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 10957 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \ 10958 10958 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \ 10959 10959 return iemRaiseUndefinedOpcode(pVCpu); \ 10960 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \10960 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 10961 10961 return iemRaiseDeviceNotAvailable(pVCpu); \ 10962 10962 } while (0) 10963 10963 #define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \ 10964 10964 do { \ 10965 if ( ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \10966 || !( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \10965 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 10966 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \ 10967 10967 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \ 10968 10968 return iemRaiseUndefinedOpcode(pVCpu); \ 10969 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \10969 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 10970 10970 return iemRaiseDeviceNotAvailable(pVCpu); \ 10971 10971 } while (0) 10972 10972 #define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \ 10973 10973 do { \ 10974 if ( ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \10975 || !( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \10974 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 10975 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \ 10976 10976 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \ 10977 10977 return iemRaiseUndefinedOpcode(pVCpu); \ 10978 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \10978 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 10979 10979 return iemRaiseDeviceNotAvailable(pVCpu); \ 10980 10980 } while (0) 10981 10981 #define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \ 10982 10982 do { \ 10983 if ( ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \10984 || !( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \10983 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 10984 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \ 10985 10985 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \ 10986 10986 return iemRaiseUndefinedOpcode(pVCpu); \ 10987 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \10987 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 10988 10988 return iemRaiseDeviceNotAvailable(pVCpu); \ 10989 10989 } while (0) 10990 10990 #define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \ 10991 10991 do { \ 10992 if ( ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \10992 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 10993 10993 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \ 10994 10994 return iemRaiseUndefinedOpcode(pVCpu); \ 10995 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \10995 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 10996 10996 return iemRaiseDeviceNotAvailable(pVCpu); \ 10997 10997 } while (0) 10998 10998 #define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \ 10999 10999 do { \ 11000 if ( ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \11000 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 11001 11001 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \ 11002 11002 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \ 11003 11003 return iemRaiseUndefinedOpcode(pVCpu); \ 11004 if ( IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \11004 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 11005 11005 return iemRaiseDeviceNotAvailable(pVCpu); \ 11006 11006 } while (0) … … 11019 11019 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \ 11020 11020 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \ 11021 || !( IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \11021 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \ 11022 11022 return iemRaiseUndefinedOpcode(pVCpu); \ 11023 11023 } while (0) … … 11039 11039 uint32_t *a_pName = &a_Name 11040 11040 #define IEM_MC_COMMIT_EFLAGS(a_EFlags) \ 11041 do { IEM_GET_CTX(pVCpu)->eflags.u = (a_EFlags); Assert(IEM_GET_CTX(pVCpu)->eflags.u & X86_EFL_1); } while (0)11041 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0) 11042 11042 11043 11043 #define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst) … … 11082 11082 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \ 11083 11083 } while (0) 11084 #define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t) IEM_GET_CTX(pVCpu)->cr011085 #define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t) IEM_GET_CTX(pVCpu)->cr011086 #define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = IEM_GET_CTX(pVCpu)->cr011084 #define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)pVCpu->cpum.GstCtx.cr0 11085 #define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)pVCpu->cpum.GstCtx.cr0 11086 #define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = pVCpu->cpum.GstCtx.cr0 11087 11087 /** @todo IEM_MC_FETCH_LDTR_U16, IEM_MC_FETCH_LDTR_U32, IEM_MC_FETCH_LDTR_U64, IEM_MC_FETCH_TR_U16, IEM_MC_FETCH_TR_U32, and IEM_MC_FETCH_TR_U64 aren't worth it... */ 11088 11088 #define IEM_MC_FETCH_LDTR_U16(a_u16Dst) do { \ 11089 11089 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_LDTR); \ 11090 (a_u16Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \11090 (a_u16Dst) = pVCpu->cpum.GstCtx.ldtr.Sel; \ 11091 11091 } while (0) 11092 11092 #define IEM_MC_FETCH_LDTR_U32(a_u32Dst) do { \ 11093 11093 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_LDTR); \ 11094 (a_u32Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \11094 (a_u32Dst) = pVCpu->cpum.GstCtx.ldtr.Sel; \ 11095 11095 } while (0) 11096 11096 #define IEM_MC_FETCH_LDTR_U64(a_u64Dst) do { \ 11097 11097 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_LDTR); \ 11098 (a_u64Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \11098 (a_u64Dst) = pVCpu->cpum.GstCtx.ldtr.Sel; \ 11099 11099 } while (0) 11100 11100 #define IEM_MC_FETCH_TR_U16(a_u16Dst) do { \ 11101 11101 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_TR); \ 11102 (a_u16Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \11102 (a_u16Dst) = pVCpu->cpum.GstCtx.tr.Sel; \ 11103 11103 } while (0) 11104 11104 #define IEM_MC_FETCH_TR_U32(a_u32Dst) do { \ 11105 11105 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_TR); \ 11106 (a_u32Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \11106 (a_u32Dst) = pVCpu->cpum.GstCtx.tr.Sel; \ 11107 11107 } while (0) 11108 11108 #define IEM_MC_FETCH_TR_U64(a_u64Dst) do { \ 11109 11109 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_TR); \ 11110 (a_u64Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \11110 (a_u64Dst) = pVCpu->cpum.GstCtx.tr.Sel; \ 11111 11111 } while (0) 11112 11112 /** @note Not for IOPL or IF testing or modification. */ 11113 #define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = IEM_GET_CTX(pVCpu)->eflags.u11114 #define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t) IEM_GET_CTX(pVCpu)->eflags.u11115 #define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW11116 #define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW11113 #define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u 11114 #define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u 11115 #define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW 11116 #define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW 11117 11117 11118 11118 #define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value) … … 11136 11136 } while (0) 11137 11137 #define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \ 11138 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)11138 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0) 11139 11139 11140 11140 … … 11146 11146 #define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg)) 11147 11147 /** @note Not for IOPL or IF testing or modification. */ 11148 #define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = & IEM_GET_CTX(pVCpu)->eflags.u11148 #define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u 11149 11149 11150 11150 #define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value) … … 11224 11224 11225 11225 /** @note Not for IOPL or IF modification. */ 11226 #define IEM_MC_SET_EFL_BIT(a_fBit) do { IEM_GET_CTX(pVCpu)->eflags.u |= (a_fBit); } while (0)11226 #define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0) 11227 11227 /** @note Not for IOPL or IF modification. */ 11228 #define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { IEM_GET_CTX(pVCpu)->eflags.u &= ~(a_fBit); } while (0)11228 #define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0) 11229 11229 /** @note Not for IOPL or IF modification. */ 11230 #define IEM_MC_FLIP_EFL_BIT(a_fBit) do { IEM_GET_CTX(pVCpu)->eflags.u ^= (a_fBit); } while (0)11231 11232 #define IEM_MC_CLEAR_FSW_EX() do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)11230 #define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0) 11231 11232 #define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0) 11233 11233 11234 11234 /** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */ 11235 11235 #define IEM_MC_FPU_TO_MMX_MODE() do { \ 11236 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \11237 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \11236 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \ 11237 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \ 11238 11238 } while (0) 11239 11239 11240 11240 /** Switches the FPU state from MMX mode (FTW=0xffff). */ 11241 11241 #define IEM_MC_FPU_FROM_MMX_MODE() do { \ 11242 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \11242 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \ 11243 11243 } while (0) 11244 11244 11245 11245 #define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \ 11246 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)11246 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0) 11247 11247 #define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \ 11248 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)11248 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0) 11249 11249 #define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \ 11250 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \11251 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \11250 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \ 11251 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \ 11252 11252 } while (0) 11253 11253 #define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \ 11254 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \11255 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \11254 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \ 11255 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \ 11256 11256 } while (0) 11257 11257 #define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \ 11258 (a_pu64Dst) = (& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)11258 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 11259 11259 #define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \ 11260 (a_pu64Dst) = ((uint64_t const *)& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)11260 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 11261 11261 #define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \ 11262 (a_pu32Dst) = ((uint32_t const *)& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)11262 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 11263 11263 11264 11264 #define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \ 11265 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \11266 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \11265 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \ 11266 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \ 11267 11267 } while (0) 11268 11268 #define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \ 11269 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)11269 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0) 11270 11270 #define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \ 11271 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)11271 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0) 11272 11272 #define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \ 11273 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)11273 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0) 11274 11274 #define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \ 11275 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \11276 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \11275 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \ 11276 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \ 11277 11277 } while (0) 11278 11278 #define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \ 11279 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)11279 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0) 11280 11280 #define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \ 11281 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \11282 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \11281 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \ 11282 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \ 11283 11283 } while (0) 11284 11284 #define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \ 11285 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)11285 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0) 11286 11286 #define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \ 11287 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \11288 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \11287 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \ 11288 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \ 11289 11289 } while (0) 11290 11290 #define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \ 11291 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)11291 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0) 11292 11292 #define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \ 11293 (a_pu128Dst) = (& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)11293 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm) 11294 11294 #define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \ 11295 (a_pu128Dst) = ((PCRTUINT128U)& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)11295 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm) 11296 11296 #define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \ 11297 (a_pu64Dst) = ((uint64_t const *)& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])11297 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]) 11298 11298 #define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \ 11299 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \11300 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \11301 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \11302 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \11299 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \ 11300 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \ 11301 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \ 11302 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \ 11303 11303 } while (0) 11304 11304 11305 11305 #define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \ 11306 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11306 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11307 11307 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ 11308 11308 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \ 11309 11309 } while (0) 11310 11310 #define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \ 11311 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11311 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11312 11312 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ 11313 11313 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \ 11314 11314 } while (0) 11315 11315 #define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \ 11316 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11316 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11317 11317 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ 11318 11318 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \ … … 11320 11320 } while (0) 11321 11321 #define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \ 11322 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11322 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11323 11323 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ 11324 11324 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \ … … 11330 11330 #define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0) 11331 11331 #define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \ 11332 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11332 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11333 11333 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11334 11334 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \ … … 11340 11340 } while (0) 11341 11341 #define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \ 11342 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11342 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11343 11343 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11344 11344 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \ … … 11349 11349 } while (0) 11350 11350 #define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \ 11351 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11351 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11352 11352 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11353 11353 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \ … … 11358 11358 } while (0) 11359 11359 #define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \ 11360 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11360 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11361 11361 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11362 11362 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \ … … 11368 11368 11369 11369 #define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \ 11370 (a_pu128Dst) = (& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)11370 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm) 11371 11371 #define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \ 11372 (a_pu128Dst) = ((PCRTUINT128U)& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)11372 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm) 11373 11373 #define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \ 11374 (a_pu64Dst) = ((uint64_t const *)& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])11374 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0]) 11375 11375 #define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \ 11376 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11376 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11377 11377 uintptr_t const iYRegTmp = (a_iYReg); \ 11378 11378 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \ … … 11382 11382 11383 11383 #define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \ 11384 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11384 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11385 11385 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11386 11386 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ … … 11392 11392 } while (0) 11393 11393 #define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \ 11394 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11394 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11395 11395 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11396 11396 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ … … 11402 11402 } while (0) 11403 11403 #define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \ 11404 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11404 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11405 11405 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11406 11406 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ … … 11413 11413 11414 11414 #define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \ 11415 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11415 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11416 11416 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11417 11417 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \ … … 11425 11425 } while (0) 11426 11426 #define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \ 11427 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11427 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11428 11428 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11429 11429 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \ … … 11436 11436 } while (0) 11437 11437 #define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \ 11438 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11438 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11439 11439 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11440 11440 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \ … … 11447 11447 } while (0) 11448 11448 #define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \ 11449 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \11449 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \ 11450 11450 uintptr_t const iYRegDstTmp = (a_iYRegDst); \ 11451 11451 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \ … … 11802 11802 if ( !(a_u16FSW & X86_FSW_ES) \ 11803 11803 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 11804 & ~( IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \11804 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \ 11805 11805 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \ 11806 11806 } while (0) … … 11942 11942 #define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \ 11943 11943 do { \ 11944 a_pfnAImpl(& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \11944 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \ 11945 11945 } while (0) 11946 11946 … … 11954 11954 #define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \ 11955 11955 do { \ 11956 a_pfnAImpl(& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \11956 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \ 11957 11957 } while (0) 11958 11958 … … 11967 11967 #define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ 11968 11968 do { \ 11969 a_pfnAImpl(& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \11969 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 11970 11970 } while (0) 11971 11971 … … 12109 12109 do { \ 12110 12110 IEM_MC_PREPARE_FPU_USAGE(); \ 12111 a_pfnAImpl(& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \12111 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \ 12112 12112 } while (0) 12113 12113 … … 12123 12123 do { \ 12124 12124 IEM_MC_PREPARE_FPU_USAGE(); \ 12125 a_pfnAImpl(& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \12125 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 12126 12126 } while (0) 12127 12127 … … 12137 12137 do { \ 12138 12138 IEM_MC_PREPARE_SSE_USAGE(); \ 12139 a_pfnAImpl(& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \12139 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \ 12140 12140 } while (0) 12141 12141 … … 12151 12151 do { \ 12152 12152 IEM_MC_PREPARE_SSE_USAGE(); \ 12153 a_pfnAImpl(& IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \12153 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 12154 12154 } while (0) 12155 12155 … … 12158 12158 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */ 12159 12159 #define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \ 12160 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState), 0)12160 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0) 12161 12161 12162 12162 /** … … 12192 12192 12193 12193 /** @note Not for IOPL or IF testing. */ 12194 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if ( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {12194 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) { 12195 12195 /** @note Not for IOPL or IF testing. */ 12196 #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {12196 #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) { 12197 12197 /** @note Not for IOPL or IF testing. */ 12198 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if ( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {12198 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) { 12199 12199 /** @note Not for IOPL or IF testing. */ 12200 #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {12200 #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) { 12201 12201 /** @note Not for IOPL or IF testing. */ 12202 12202 #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \ 12203 if ( !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \12204 != !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {12203 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \ 12204 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) { 12205 12205 /** @note Not for IOPL or IF testing. */ 12206 12206 #define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \ 12207 if ( !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \12208 == !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {12207 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \ 12208 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) { 12209 12209 /** @note Not for IOPL or IF testing. */ 12210 12210 #define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \ 12211 if ( ( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \12212 || !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \12213 != !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {12211 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \ 12212 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \ 12213 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) { 12214 12214 /** @note Not for IOPL or IF testing. */ 12215 12215 #define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \ 12216 if ( !( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \12217 && !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \12218 == !!( IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {12219 #define IEM_MC_IF_CX_IS_NZ() if ( IEM_GET_CTX(pVCpu)->cx != 0) {12220 #define IEM_MC_IF_ECX_IS_NZ() if ( IEM_GET_CTX(pVCpu)->ecx != 0) {12221 #define IEM_MC_IF_RCX_IS_NZ() if ( IEM_GET_CTX(pVCpu)->rcx != 0) {12216 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \ 12217 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \ 12218 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) { 12219 #define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) { 12220 #define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) { 12221 #define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) { 12222 12222 /** @note Not for IOPL or IF testing. */ 12223 12223 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 12224 if ( IEM_GET_CTX(pVCpu)->cx != 0 \12225 && ( IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {12224 if ( pVCpu->cpum.GstCtx.cx != 0 \ 12225 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) { 12226 12226 /** @note Not for IOPL or IF testing. */ 12227 12227 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 12228 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \12229 && ( IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {12228 if ( pVCpu->cpum.GstCtx.ecx != 0 \ 12229 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) { 12230 12230 /** @note Not for IOPL or IF testing. */ 12231 12231 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 12232 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \12233 && ( IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {12232 if ( pVCpu->cpum.GstCtx.rcx != 0 \ 12233 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) { 12234 12234 /** @note Not for IOPL or IF testing. */ 12235 12235 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 12236 if ( IEM_GET_CTX(pVCpu)->cx != 0 \12237 && !( IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {12236 if ( pVCpu->cpum.GstCtx.cx != 0 \ 12237 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) { 12238 12238 /** @note Not for IOPL or IF testing. */ 12239 12239 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 12240 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \12241 && !( IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {12240 if ( pVCpu->cpum.GstCtx.ecx != 0 \ 12241 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) { 12242 12242 /** @note Not for IOPL or IF testing. */ 12243 12243 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 12244 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \12245 && !( IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {12244 if ( pVCpu->cpum.GstCtx.rcx != 0 \ 12245 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) { 12246 12246 #define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) { 12247 12247 #define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) { … … 12258 12258 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) { 12259 12259 #define IEM_MC_IF_FCW_IM() \ 12260 if ( IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {12260 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) { 12261 12261 12262 12262 #define IEM_MC_ELSE() } else { … … 12279 12279 do { \ 12280 12280 IEMOP_INC_STATS(a_Stats); \ 12281 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \12281 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \ 12282 12282 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \ 12283 12283 } while (0) … … 12499 12499 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \ 12500 12500 { \ 12501 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \ 12502 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \ 12501 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \ 12503 12502 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \ 12504 12503 pVCpu->iem.s.uRexB = 0; \ … … 13780 13779 && rcStrict == VINF_SUCCESS 13781 13780 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 13782 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )13781 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip ) 13783 13782 { 13784 13783 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers); … … 13823 13822 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict); 13824 13823 13825 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, & IEM_GET_CTX(pVCpu)->cs));13826 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, & IEM_GET_CTX(pVCpu)->ss));13824 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs)); 13825 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 13827 13826 return rcStrict; 13828 13827 } … … 13890 13889 VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten) 13891 13890 { 13892 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 13893 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 13891 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3); 13894 13892 13895 13893 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten; … … 13914 13912 const void *pvOpcodeBytes, size_t cbOpcodeBytes) 13915 13913 { 13916 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 13917 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 13914 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3); 13918 13915 13919 13916 VBOXSTRICTRC rcStrict; … … 13950 13947 VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten) 13951 13948 { 13952 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 13953 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 13949 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3); 13954 13950 13955 13951 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten; … … 13974 13970 const void *pvOpcodeBytes, size_t cbOpcodeBytes) 13975 13971 { 13976 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 13977 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 13972 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3); 13978 13973 13979 13974 VBOXSTRICTRC rcStrict; … … 14184 14179 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder). 14185 14180 */ 14186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, & IEM_GET_CTX(pVCpu)->cs));14187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, & IEM_GET_CTX(pVCpu)->ss));14181 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs)); 14182 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 14188 14183 } 14189 14184 else -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r72496 r72497 4489 4489 4490 4490 /* commit */ 4491 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fAccessible;4491 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible; 4492 4492 4493 4493 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 4595 4595 4596 4596 /* commit flags value and advance rip. */ 4597 IEM_GET_CTX(pVCpu)->eflags.Bits.u1ZF = fDescOk;4597 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk; 4598 4598 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4599 4599 … … 4630 4630 if (pVCpu->iem.s.uCpl != 0) 4631 4631 return iemRaiseGeneralProtectionFault0(pVCpu); 4632 Assert(! IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);4632 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 4633 4633 4634 4634 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES)) … … 4703 4703 if (pVCpu->iem.s.uCpl != 0) 4704 4704 return iemRaiseGeneralProtectionFault0(pVCpu); 4705 Assert(! IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);4705 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 4706 4706 4707 4707 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES)) … … 5500 5500 if (pVCpu->iem.s.uCpl != 0) 5501 5501 return iemRaiseGeneralProtectionFault0(pVCpu); 5502 Assert(! IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);5502 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 5503 5503 5504 5504 /* … … 5761 5761 if (pVCpu->iem.s.uCpl != 0) 5762 5762 return iemRaiseGeneralProtectionFault0(pVCpu); 5763 Assert(! IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM);5763 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 5764 5764 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); 5765 5765 … … 5835 5835 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi; 5836 5836 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff); 5837 uint32_t const uCr4 = IEM_GET_CTX(pVCpu)->cr4;5838 uint64_t const uCr3 = IEM_GET_CTX(pVCpu)->cr3;5837 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4; 5838 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3; 5839 5839 switch (uInvpcidType) 5840 5840 { … … 6221 6221 IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg) 6222 6222 { 6223 return IEM_CIMPL_CALL_2(iemCImpl_in, IEM_GET_CTX(pVCpu)->dx, cbReg);6223 return IEM_CIMPL_CALL_2(iemCImpl_in, pVCpu->cpum.GstCtx.dx, cbReg); 6224 6224 } 6225 6225 … … 6313 6313 IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg) 6314 6314 { 6315 return IEM_CIMPL_CALL_2(iemCImpl_out, IEM_GET_CTX(pVCpu)->dx, cbReg);6315 return IEM_CIMPL_CALL_2(iemCImpl_out, pVCpu->cpum.GstCtx.dx, cbReg); 6316 6316 } 6317 6317 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r72496 r72497 153 153 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 154 154 { 155 PVM pVM = pVCpu->CTX_SUFF(pVM); 156 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 155 PVM pVM = pVCpu->CTX_SUFF(pVM); 157 156 158 157 /* 159 158 * Setup. 160 159 */ 161 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;160 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 162 161 if (uCounterReg == 0) 163 162 { … … 175 174 176 175 uint64_t uSrc2Base; 177 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &p Ctx->es), X86_SREG_ES, &uSrc2Base);176 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base); 178 177 if (rcStrict != VINF_SUCCESS) 179 178 return rcStrict; 180 179 181 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);182 ADDR_TYPE uSrc1AddrReg = p Ctx->ADDR_rSI;183 ADDR_TYPE uSrc2AddrReg = p Ctx->ADDR_rDI;184 uint32_t uEFlags = p Ctx->eflags.u;180 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 181 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI; 182 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI; 183 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u; 185 184 186 185 /* … … 205 204 || ( uSrc1AddrReg < pSrc1Hid->u32Limit 206 205 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit 207 && uSrc2AddrReg < p Ctx->es.u32Limit208 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= p Ctx->es.u32Limit)206 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit 207 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit) 209 208 ) 210 209 ) … … 259 258 260 259 /* Update the registers before looping. */ 261 p Ctx->ADDR_rCX = uCounterReg;262 p Ctx->ADDR_rSI = uSrc1AddrReg;263 p Ctx->ADDR_rDI = uSrc2AddrReg;264 p Ctx->eflags.u = uEFlags;260 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg; 261 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg; 262 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg; 263 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 265 264 266 265 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem); … … 293 292 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags); 294 293 295 p Ctx->ADDR_rSI = uSrc1AddrReg += cbIncr;296 p Ctx->ADDR_rDI = uSrc2AddrReg += cbIncr;297 p Ctx->ADDR_rCX = --uCounterReg;298 p Ctx->eflags.u = uEFlags;294 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr; 295 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr; 296 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 297 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 299 298 cLeftPage--; 300 299 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF)); … … 324 323 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 325 324 { 326 PVM pVM = pVCpu->CTX_SUFF(pVM); 327 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 325 PVM pVM = pVCpu->CTX_SUFF(pVM); 328 326 329 327 /* 330 328 * Setup. 331 329 */ 332 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;330 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 333 331 if (uCounterReg == 0) 334 332 { … … 346 344 347 345 uint64_t uSrc2Base; 348 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &p Ctx->es), X86_SREG_ES, &uSrc2Base);346 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base); 349 347 if (rcStrict != VINF_SUCCESS) 350 348 return rcStrict; 351 349 352 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);353 ADDR_TYPE uSrc1AddrReg = p Ctx->ADDR_rSI;354 ADDR_TYPE uSrc2AddrReg = p Ctx->ADDR_rDI;355 uint32_t uEFlags = p Ctx->eflags.u;350 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 351 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI; 352 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI; 353 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u; 356 354 357 355 /* … … 376 374 || ( uSrc1AddrReg < pSrc1Hid->u32Limit 377 375 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit 378 && uSrc2AddrReg < p Ctx->es.u32Limit379 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= p Ctx->es.u32Limit)376 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit 377 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit) 380 378 ) 381 379 ) … … 430 428 431 429 /* Update the registers before looping. */ 432 p Ctx->ADDR_rCX = uCounterReg;433 p Ctx->ADDR_rSI = uSrc1AddrReg;434 p Ctx->ADDR_rDI = uSrc2AddrReg;435 p Ctx->eflags.u = uEFlags;430 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg; 431 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg; 432 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg; 433 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 436 434 437 435 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem); … … 464 462 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags); 465 463 466 p Ctx->ADDR_rSI = uSrc1AddrReg += cbIncr;467 p Ctx->ADDR_rDI = uSrc2AddrReg += cbIncr;468 p Ctx->ADDR_rCX = --uCounterReg;469 p Ctx->eflags.u = uEFlags;464 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr; 465 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr; 466 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 467 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 470 468 cLeftPage--; 471 469 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF)); … … 495 493 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE)) 496 494 { 497 PVM pVM = pVCpu->CTX_SUFF(pVM); 498 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 495 PVM pVM = pVCpu->CTX_SUFF(pVM); 499 496 500 497 /* 501 498 * Setup. 502 499 */ 503 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;500 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 504 501 if (uCounterReg == 0) 505 502 { … … 510 507 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES); 511 508 uint64_t uBaseAddr; 512 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &p Ctx->es), X86_SREG_ES, &uBaseAddr);509 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr); 513 510 if (rcStrict != VINF_SUCCESS) 514 511 return rcStrict; 515 512 516 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);517 OP_TYPE const uValueReg = p Ctx->OP_rAX;518 ADDR_TYPE uAddrReg = p Ctx->ADDR_rDI;519 uint32_t uEFlags = p Ctx->eflags.u;513 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 514 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX; 515 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI; 516 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u; 520 517 521 518 /* … … 534 531 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 535 532 && ( IS_64_BIT_CODE(pVCpu) 536 || ( uAddrReg < p Ctx->es.u32Limit537 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= p Ctx->es.u32Limit)533 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit 534 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit) 538 535 ) 539 536 ) … … 565 562 /* Update the regs. */ 566 563 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 567 p Ctx->ADDR_rCX = uCounterReg -= i;568 p Ctx->ADDR_rDI = uAddrReg += i * cbIncr;569 p Ctx->eflags.u = uEFlags;564 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i; 565 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr; 566 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 570 567 Assert(!(uEFlags & X86_EFL_ZF) == fQuit); 571 568 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); … … 598 595 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 599 596 600 p Ctx->ADDR_rDI = uAddrReg += cbIncr;601 p Ctx->ADDR_rCX = --uCounterReg;602 p Ctx->eflags.u = uEFlags;597 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr; 598 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 599 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 603 600 cLeftPage--; 604 601 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF)); … … 628 625 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE)) 629 626 { 630 PVM pVM = pVCpu->CTX_SUFF(pVM); 631 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 627 PVM pVM = pVCpu->CTX_SUFF(pVM); 632 628 633 629 /* 634 630 * Setup. 635 631 */ 636 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;632 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 637 633 if (uCounterReg == 0) 638 634 { … … 643 639 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES); 644 640 uint64_t uBaseAddr; 645 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &p Ctx->es), X86_SREG_ES, &uBaseAddr);641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr); 646 642 if (rcStrict != VINF_SUCCESS) 647 643 return rcStrict; 648 644 649 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);650 OP_TYPE const uValueReg = p Ctx->OP_rAX;651 ADDR_TYPE uAddrReg = p Ctx->ADDR_rDI;652 uint32_t uEFlags = p Ctx->eflags.u;645 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 646 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX; 647 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI; 648 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u; 653 649 654 650 /* … … 667 663 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 668 664 && ( IS_64_BIT_CODE(pVCpu) 669 || ( uAddrReg < p Ctx->es.u32Limit670 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= p Ctx->es.u32Limit)665 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit 666 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit) 671 667 ) 672 668 ) … … 698 694 /* Update the regs. */ 699 695 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 700 p Ctx->ADDR_rCX = uCounterReg -= i;701 p Ctx->ADDR_rDI = uAddrReg += i * cbIncr;702 p Ctx->eflags.u = uEFlags;696 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i; 697 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr; 698 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 703 699 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit); 704 700 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); … … 730 726 return rcStrict; 731 727 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags); 732 p Ctx->ADDR_rDI = uAddrReg += cbIncr;733 p Ctx->ADDR_rCX = --uCounterReg;734 p Ctx->eflags.u = uEFlags;728 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr; 729 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 730 pVCpu->cpum.GstCtx.eflags.u = uEFlags; 735 731 cLeftPage--; 736 732 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF)); … … 762 758 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 763 759 { 764 PVM pVM = pVCpu->CTX_SUFF(pVM); 765 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 760 PVM pVM = pVCpu->CTX_SUFF(pVM); 766 761 767 762 /* 768 763 * Setup. 769 764 */ 770 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;765 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 771 766 if (uCounterReg == 0) 772 767 { … … 784 779 785 780 uint64_t uDstBase; 786 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &p Ctx->es), X86_SREG_ES, &uDstBase);781 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uDstBase); 787 782 if (rcStrict != VINF_SUCCESS) 788 783 return rcStrict; 789 784 790 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);791 ADDR_TYPE uSrcAddrReg = p Ctx->ADDR_rSI;792 ADDR_TYPE uDstAddrReg = p Ctx->ADDR_rDI;785 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 786 ADDR_TYPE uSrcAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI; 787 ADDR_TYPE uDstAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI; 793 788 794 789 /* … … 822 817 || ( uSrcAddrReg < pSrcHid->u32Limit 823 818 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit 824 && uDstAddrReg < p Ctx->es.u32Limit825 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= p Ctx->es.u32Limit)819 && uDstAddrReg < pVCpu->cpum.GstCtx.es.u32Limit 820 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit) 826 821 ) 827 822 ) … … 864 859 865 860 /* Update the registers. */ 866 p Ctx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;867 p Ctx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;868 p Ctx->ADDR_rCX = uCounterReg -= cLeftPage;861 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr; 862 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr; 863 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage; 869 864 870 865 iemMemPageUnmap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem); … … 873 868 if (uCounterReg == 0) 874 869 break; 875 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);870 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 876 871 continue; 877 872 } … … 895 890 return rcStrict; 896 891 897 p Ctx->ADDR_rSI = uSrcAddrReg += cbIncr;898 p Ctx->ADDR_rDI = uDstAddrReg += cbIncr;899 p Ctx->ADDR_rCX = --uCounterReg;892 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cbIncr; 893 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cbIncr; 894 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 900 895 cLeftPage--; 901 896 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); … … 907 902 if (uCounterReg == 0) 908 903 break; 909 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);904 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 910 905 } 911 906 … … 923 918 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE)) 924 919 { 925 PVM pVM = pVCpu->CTX_SUFF(pVM); 926 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 920 PVM pVM = pVCpu->CTX_SUFF(pVM); 927 921 928 922 /* 929 923 * Setup. 930 924 */ 931 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;925 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 932 926 if (uCounterReg == 0) 933 927 { … … 939 933 940 934 uint64_t uBaseAddr; 941 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &p Ctx->es), X86_SREG_ES, &uBaseAddr);935 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr); 942 936 if (rcStrict != VINF_SUCCESS) 943 937 return rcStrict; 944 938 945 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);946 OP_TYPE const uValue = p Ctx->OP_rAX;947 ADDR_TYPE uAddrReg = p Ctx->ADDR_rDI;939 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 940 OP_TYPE const uValue = pVCpu->cpum.GstCtx.OP_rAX; 941 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI; 948 942 949 943 /* … … 972 966 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 973 967 && ( IS_64_BIT_CODE(pVCpu) 974 || ( uAddrReg < p Ctx->es.u32Limit975 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= p Ctx->es.u32Limit)968 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit 969 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit) 976 970 ) 977 971 ) … … 992 986 { 993 987 /* Update the regs first so we can loop on cLeftPage. */ 994 p Ctx->ADDR_rCX = uCounterReg -= cLeftPage;995 p Ctx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;988 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage; 989 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr; 996 990 997 991 /* Do the memsetting. */ … … 1014 1008 if (!(uVirtAddr & (OP_SIZE - 1))) 1015 1009 { 1016 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1010 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1017 1011 continue; 1018 1012 } … … 1031 1025 if (rcStrict != VINF_SUCCESS) 1032 1026 return rcStrict; 1033 p Ctx->ADDR_rDI = uAddrReg += cbIncr;1034 p Ctx->ADDR_rCX = --uCounterReg;1027 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr; 1028 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 1035 1029 cLeftPage--; 1036 1030 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); … … 1042 1036 if (uCounterReg == 0) 1043 1037 break; 1044 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1038 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1045 1039 } 1046 1040 … … 1058 1052 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg) 1059 1053 { 1060 PVM pVM = pVCpu->CTX_SUFF(pVM); 1061 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1054 PVM pVM = pVCpu->CTX_SUFF(pVM); 1062 1055 1063 1056 /* 1064 1057 * Setup. 1065 1058 */ 1066 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;1059 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 1067 1060 if (uCounterReg == 0) 1068 1061 { … … 1078 1071 return rcStrict; 1079 1072 1080 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);1081 ADDR_TYPE uAddrReg = p Ctx->ADDR_rSI;1073 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 1074 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI; 1082 1075 1083 1076 /* … … 1117 1110 /* Only get the last byte, the rest doesn't matter in direct access mode. */ 1118 1111 #if OP_SIZE == 32 1119 p Ctx->rax = puMem[cLeftPage - 1];1112 pVCpu->cpum.GstCtx.rax = puMem[cLeftPage - 1]; 1120 1113 #else 1121 p Ctx->OP_rAX = puMem[cLeftPage - 1];1114 pVCpu->cpum.GstCtx.OP_rAX = puMem[cLeftPage - 1]; 1122 1115 #endif 1123 p Ctx->ADDR_rCX = uCounterReg -= cLeftPage;1124 p Ctx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;1116 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage; 1117 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cLeftPage * cbIncr; 1125 1118 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 1126 1119 … … 1132 1125 if (!(uVirtAddr & (OP_SIZE - 1))) 1133 1126 { 1134 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1127 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1135 1128 continue; 1136 1129 } … … 1151 1144 return rcStrict; 1152 1145 #if OP_SIZE == 32 1153 p Ctx->rax = uTmpValue;1146 pVCpu->cpum.GstCtx.rax = uTmpValue; 1154 1147 #else 1155 p Ctx->OP_rAX = uTmpValue;1148 pVCpu->cpum.GstCtx.OP_rAX = uTmpValue; 1156 1149 #endif 1157 p Ctx->ADDR_rSI = uAddrReg += cbIncr;1158 p Ctx->ADDR_rCX = --uCounterReg;1150 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr; 1151 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 1159 1152 cLeftPage--; 1160 1153 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); … … 1169 1162 if (uCounterReg == 0) 1170 1163 break; 1171 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1164 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1172 1165 } 1173 1166 … … 1188 1181 { 1189 1182 PVM pVM = pVCpu->CTX_SUFF(pVM); 1190 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1191 1183 VBOXSTRICTRC rcStrict; 1192 1184 … … 1207 1199 if (!fIoChecked) 1208 1200 { 1209 rcStrict = iemHlpCheckPortIOPermission(pVCpu, p Ctx->dx, OP_SIZE / 8);1201 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8); 1210 1202 if (rcStrict != VINF_SUCCESS) 1211 1203 return rcStrict; … … 1218 1210 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 1219 1211 { 1220 rcStrict = iemSvmHandleIOIntercept(pVCpu, p Ctx->dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, false /* fRep */,1212 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, false /* fRep */, 1221 1213 true /* fStrIo */, cbInstr); 1222 1214 if (rcStrict == VINF_SVM_VMEXIT) … … 1224 1216 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 1225 1217 { 1226 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", p Ctx->dx, OP_SIZE / 8,1218 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, 1227 1219 VBOXSTRICTRC_VAL(rcStrict))); 1228 1220 return rcStrict; … … 1232 1224 1233 1225 OP_TYPE *puMem; 1234 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, p Ctx->ADDR_rDI, IEM_ACCESS_DATA_W);1226 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI, IEM_ACCESS_DATA_W); 1235 1227 if (rcStrict != VINF_SUCCESS) 1236 1228 return rcStrict; 1237 1229 1238 1230 uint32_t u32Value = 0; 1239 rcStrict = IOMIOPortRead(pVM, pVCpu, p Ctx->dx, &u32Value, OP_SIZE / 8);1231 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, &u32Value, OP_SIZE / 8); 1240 1232 if (IOM_SUCCESS(rcStrict)) 1241 1233 { … … 1248 1240 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS)) 1249 1241 { 1250 if (!p Ctx->eflags.Bits.u1DF)1251 p Ctx->ADDR_rDI += OP_SIZE / 8;1242 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF) 1243 pVCpu->cpum.GstCtx.ADDR_rDI += OP_SIZE / 8; 1252 1244 else 1253 p Ctx->ADDR_rDI -= OP_SIZE / 8;1245 pVCpu->cpum.GstCtx.ADDR_rDI -= OP_SIZE / 8; 1254 1246 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1255 1247 } … … 1266 1258 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked) 1267 1259 { 1268 PVM pVM = pVCpu->CTX_SUFF(pVM); 1269 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1260 PVM pVM = pVCpu->CTX_SUFF(pVM); 1270 1261 1271 1262 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR); … … 1274 1265 * Setup. 1275 1266 */ 1276 uint16_t const u16Port = p Ctx->dx;1267 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx; 1277 1268 VBOXSTRICTRC rcStrict; 1278 1269 if (!fIoChecked) … … 1303 1294 #endif 1304 1295 1305 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;1296 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 1306 1297 if (uCounterReg == 0) 1307 1298 { … … 1311 1302 1312 1303 uint64_t uBaseAddr; 1313 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &p Ctx->es), X86_SREG_ES, &uBaseAddr);1304 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr); 1314 1305 if (rcStrict != VINF_SUCCESS) 1315 1306 return rcStrict; 1316 1307 1317 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);1318 ADDR_TYPE uAddrReg = p Ctx->ADDR_rDI;1308 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 1309 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI; 1319 1310 1320 1311 /* … … 1342 1333 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 1343 1334 && ( IS_64_BIT_CODE(pVCpu) 1344 || ( uAddrReg < p Ctx->es.u32Limit1345 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= p Ctx->es.u32Limit)1335 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit 1336 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit) 1346 1337 ) 1347 1338 ) … … 1366 1357 uint32_t cActualTransfers = cLeftPage - cTransfers; 1367 1358 Assert(cActualTransfers <= cLeftPage); 1368 p Ctx->ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;1369 p Ctx->ADDR_rCX = uCounterReg -= cActualTransfers;1359 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr * cActualTransfers; 1360 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers; 1370 1361 puMem += cActualTransfers; 1371 1362 … … 1389 1380 if (!(uVirtAddr & (OP_SIZE - 1))) 1390 1381 { 1391 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1382 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1392 1383 continue; 1393 1384 } … … 1430 1421 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1); 1431 1422 1432 p Ctx->ADDR_rDI = uAddrReg += cbIncr;1433 p Ctx->ADDR_rCX = --uCounterReg;1423 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr; 1424 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 1434 1425 1435 1426 cLeftPage--; … … 1451 1442 if (uCounterReg == 0) 1452 1443 break; 1453 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1444 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1454 1445 } 1455 1446 … … 1468 1459 { 1469 1460 PVM pVM = pVCpu->CTX_SUFF(pVM); 1470 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1471 1461 VBOXSTRICTRC rcStrict; 1472 1462 … … 1478 1468 if (!fIoChecked) 1479 1469 { 1480 rcStrict = iemHlpCheckPortIOPermission(pVCpu, p Ctx->dx, OP_SIZE / 8);1470 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8); 1481 1471 if (rcStrict != VINF_SUCCESS) 1482 1472 return rcStrict; … … 1489 1479 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT)) 1490 1480 { 1491 rcStrict = iemSvmHandleIOIntercept(pVCpu, p Ctx->dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, false /* fRep */,1481 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, false /* fRep */, 1492 1482 true /* fStrIo */, cbInstr); 1493 1483 if (rcStrict == VINF_SVM_VMEXIT) … … 1495 1485 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 1496 1486 { 1497 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", p Ctx->dx, OP_SIZE / 8,1487 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, 1498 1488 VBOXSTRICTRC_VAL(rcStrict))); 1499 1489 return rcStrict; … … 1503 1493 1504 1494 OP_TYPE uValue; 1505 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, p Ctx->ADDR_rSI);1495 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pVCpu->cpum.GstCtx.ADDR_rSI); 1506 1496 if (rcStrict == VINF_SUCCESS) 1507 1497 { 1508 rcStrict = IOMIOPortWrite(pVM, pVCpu, p Ctx->dx, uValue, OP_SIZE / 8);1498 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, uValue, OP_SIZE / 8); 1509 1499 if (IOM_SUCCESS(rcStrict)) 1510 1500 { 1511 if (!p Ctx->eflags.Bits.u1DF)1512 p Ctx->ADDR_rSI += OP_SIZE / 8;1501 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF) 1502 pVCpu->cpum.GstCtx.ADDR_rSI += OP_SIZE / 8; 1513 1503 else 1514 p Ctx->ADDR_rSI -= OP_SIZE / 8;1504 pVCpu->cpum.GstCtx.ADDR_rSI -= OP_SIZE / 8; 1515 1505 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1516 1506 if (rcStrict != VINF_SUCCESS) … … 1527 1517 IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked) 1528 1518 { 1529 PVM pVM = pVCpu->CTX_SUFF(pVM); 1530 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1519 PVM pVM = pVCpu->CTX_SUFF(pVM); 1531 1520 1532 1521 /* 1533 1522 * Setup. 1534 1523 */ 1535 uint16_t const u16Port = p Ctx->dx;1524 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx; 1536 1525 VBOXSTRICTRC rcStrict; 1537 1526 if (!fIoChecked) … … 1562 1551 #endif 1563 1552 1564 ADDR_TYPE uCounterReg = p Ctx->ADDR_rCX;1553 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX; 1565 1554 if (uCounterReg == 0) 1566 1555 { … … 1575 1564 return rcStrict; 1576 1565 1577 int8_t const cbIncr = p Ctx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);1578 ADDR_TYPE uAddrReg = p Ctx->ADDR_rSI;1566 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8); 1567 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI; 1579 1568 1580 1569 /* … … 1617 1606 uint32_t cActualTransfers = cLeftPage - cTransfers; 1618 1607 Assert(cActualTransfers <= cLeftPage); 1619 p Ctx->ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;1620 p Ctx->ADDR_rCX = uCounterReg -= cActualTransfers;1608 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr * cActualTransfers; 1609 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers; 1621 1610 puMem += cActualTransfers; 1622 1611 … … 1641 1630 if (!(uVirtAddr & (OP_SIZE - 1))) 1642 1631 { 1643 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1632 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1644 1633 continue; 1645 1634 } … … 1668 1657 if (IOM_SUCCESS(rcStrict)) 1669 1658 { 1670 p Ctx->ADDR_rSI = uAddrReg += cbIncr;1671 p Ctx->ADDR_rCX = --uCounterReg;1659 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr; 1660 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg; 1672 1661 cLeftPage--; 1673 1662 } … … 1691 1680 if (uCounterReg == 0) 1692 1681 break; 1693 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p Ctx->eflags.u);1682 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u); 1694 1683 } 1695 1684
Note:
See TracChangeset
for help on using the changeset viewer.