Changeset 62015 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 4, 2016 7:58:28 PM (9 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r62010 r62015 150 150 151 151 #if defined(__GNUC__) && defined(RT_ARCH_X86) 152 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(P IEMCPU pIemCpu);153 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(P IEMCPU pIemCpu, uint8_t bRm);152 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu); 153 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm); 154 154 # define FNIEMOP_DEF(a_Name) \ 155 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(P IEMCPU pIemCpu)155 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu) 156 156 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ 157 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0)157 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0) 158 158 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \ 159 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)159 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) 160 160 161 161 #elif defined(_MSC_VER) && defined(RT_ARCH_X86) 162 typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(P IEMCPU pIemCpu);163 typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(P IEMCPU pIemCpu, uint8_t bRm);162 typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu); 163 typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm); 164 164 # define FNIEMOP_DEF(a_Name) \ 165 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(P IEMCPU pIemCpu) RT_NO_THROW_DEF165 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF 166 166 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ 167 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF167 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF 168 168 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \ 169 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF169 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF 170 170 171 171 #elif defined(__GNUC__) 172 typedef VBOXSTRICTRC (* PFNIEMOP)(P IEMCPU pIemCpu);173 typedef VBOXSTRICTRC (* PFNIEMOPRM)(P IEMCPU pIemCpu, uint8_t bRm);172 typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu); 173 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm); 174 174 # define FNIEMOP_DEF(a_Name) \ 175 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(P IEMCPU pIemCpu)175 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu) 176 176 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ 177 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0)177 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0) 178 178 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \ 179 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)179 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) 180 180 181 181 #else 182 typedef VBOXSTRICTRC (* PFNIEMOP)(P IEMCPU pIemCpu);183 typedef VBOXSTRICTRC (* PFNIEMOPRM)(P IEMCPU pIemCpu, uint8_t bRm);182 typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu); 183 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm); 184 184 # define FNIEMOP_DEF(a_Name) \ 185 IEM_STATIC VBOXSTRICTRC a_Name(P IEMCPU pIemCpu) RT_NO_THROW_DEF185 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF 186 186 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ 187 IEM_STATIC VBOXSTRICTRC a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF187 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF 188 188 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \ 189 IEM_STATIC VBOXSTRICTRC a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF189 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF 190 190 191 191 #endif … … 268 268 * done as we please. See FNIEMOP_DEF. 269 269 */ 270 #define FNIEMOP_CALL(a_pfn) (a_pfn)(p IemCpu)270 #define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu) 271 271 272 272 /** … … 276 276 * done as we please. See FNIEMOP_DEF_1. 277 277 */ 278 #define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(p IemCpu, a0)278 #define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0) 279 279 280 280 /** … … 284 284 * done as we please. See FNIEMOP_DEF_1. 285 285 */ 286 #define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(p IemCpu, a0, a1)286 #define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1) 287 287 288 288 /** … … 290 290 * 291 291 * @returns @c true if it is, @c false if not. 292 * @param a_p IemCpuThe IEM state of the current CPU.293 */ 294 #define IEM_IS_REAL_OR_V86_MODE(a_p IemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))292 * @param a_pVCpu The IEM state of the current CPU. 293 */ 294 #define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pVCpu)->iem.s.CTX_SUFF(pCtx))) 295 295 296 296 /** … … 298 298 * 299 299 * @returns @c true if it is, @c false if not. 300 * @param a_p IemCpuThe IEM state of the current CPU.301 */ 302 #define IEM_IS_V86_MODE(a_p IemCpu) (CPUMIsGuestInV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))300 * @param a_pVCpu The IEM state of the current CPU. 301 */ 302 #define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx((a_pVCpu)->iem.s.CTX_SUFF(pCtx))) 303 303 304 304 /** … … 306 306 * 307 307 * @returns @c true if it is, @c false if not. 308 * @param a_p IemCpuThe IEM state of the current CPU.309 */ 310 #define IEM_IS_LONG_MODE(a_p IemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))308 * @param a_pVCpu The IEM state of the current CPU. 309 */ 310 #define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx((a_pVCpu)->iem.s.CTX_SUFF(pCtx))) 311 311 312 312 /** … … 314 314 * 315 315 * @returns @c true if it is, @c false if not. 316 * @param a_p IemCpuThe IEM state of the current CPU.317 */ 318 #define IEM_IS_REAL_MODE(a_p IemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))316 * @param a_pVCpu The IEM state of the current CPU. 317 */ 318 #define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx((a_pVCpu)->iem.s.CTX_SUFF(pCtx))) 319 319 320 320 /** 321 321 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU. 322 322 * @returns PCCPUMFEATURES 323 * @param a_p IemCpuThe IEM state of the current CPU.324 */ 325 #define IEM_GET_GUEST_CPU_FEATURES(a_p IemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.GuestFeatures))323 * @param a_pVCpu The IEM state of the current CPU. 324 */ 325 #define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures)) 326 326 327 327 /** 328 328 * Returns a (const) pointer to the CPUMFEATURES for the host CPU. 329 329 * @returns PCCPUMFEATURES 330 * @param a_p IemCpuThe IEM state of the current CPU.331 */ 332 #define IEM_GET_HOST_CPU_FEATURES(a_p IemCpu) (&(IEMCPU_TO_VM(a_pIemCpu)->cpum.ro.HostFeatures))330 * @param a_pVCpu The IEM state of the current CPU. 331 */ 332 #define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures)) 333 333 334 334 /** 335 335 * Evaluates to true if we're presenting an Intel CPU to the guest. 336 336 */ 337 #define IEM_IS_GUEST_CPU_INTEL(a_p IemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL )337 #define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL ) 338 338 339 339 /** 340 340 * Evaluates to true if we're presenting an AMD CPU to the guest. 341 341 */ 342 #define IEM_IS_GUEST_CPU_AMD(a_p IemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD )342 #define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD ) 343 343 344 344 /** … … 708 708 * Internal Functions * 709 709 *********************************************************************************************************************************/ 710 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(P IEMCPU pIemCpu, uint16_t uErr);711 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(P IEMCPU pIemCpu);712 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(P IEMCPU pIemCpu);713 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(P IEMCPU pIemCpu, uint16_t uSel);714 /*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(P IEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/715 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(P IEMCPU pIemCpu, uint16_t uSel);716 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(P IEMCPU pIemCpu, uint16_t uErr);717 IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(P IEMCPU pIemCpu, uint16_t uSel);718 IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(P IEMCPU pIemCpu, uint16_t uErr);719 IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(P IEMCPU pIemCpu, uint16_t uErr);720 IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(P IEMCPU pIemCpu);721 IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(P IEMCPU pIemCpu, RTSEL uSel);722 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(P IEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);723 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(P IEMCPU pIemCpu, RTSEL Sel);724 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(P IEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);725 IEM_STATIC VBOXSTRICTRC iemRaisePageFault(P IEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);726 IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(P IEMCPU pIemCpu);727 IEM_STATIC VBOXSTRICTRC iemMemMap(P IEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);728 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(P IEMCPU pIemCpu, void *pvMem, uint32_t fAccess);729 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(P IEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);730 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(P IEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);731 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(P IEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);732 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(P IEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);733 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(P IEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);734 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(P IEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);735 IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(P IEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);736 IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(P IEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);737 IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(P IEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);738 IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(P IEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);739 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(P IEMCPU pIemCpu, uint32_t u32Value);740 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(P IEMCPU pIemCpu, uint16_t u16Value);741 IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(P IEMCPU pIemCpu, uint16_t uSel);742 IEM_STATIC uint16_t iemSRegFetchU16(P IEMCPU pIemCpu, uint8_t iSegReg);710 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr); 711 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu); 712 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu); 713 IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel); 714 /*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/ 715 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel); 716 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr); 717 IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel); 718 IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr); 719 IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr); 720 IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu); 721 IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel); 722 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess); 723 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel); 724 IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess); 725 IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc); 726 IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu); 727 IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess); 728 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess); 729 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 730 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 731 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 732 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 733 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 734 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 735 IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode); 736 IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt); 737 IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp); 738 IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp); 739 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value); 740 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value); 741 IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel); 742 IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg); 743 743 744 744 #if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL) 745 IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(P IEMCPU pIemCpu);746 #endif 747 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(P IEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);748 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(P IEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);745 IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu); 746 #endif 747 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue); 748 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue); 749 749 750 750 … … 754 754 * 755 755 * @returns VINF_SUCCESS. 756 * @param pIemCpu The per CPU IEM state of the calling thread. 756 * @param pVCpu The cross context virtual CPU structure of the 757 * calling thread. 757 758 * @param rcPassUp The pass up status. Must be informational. 758 759 * VINF_SUCCESS is not allowed. 759 760 */ 760 IEM_STATIC int iemSetPassUpStatus(P IEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)761 IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp) 761 762 { 762 763 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS); 763 764 764 int32_t const rcOldPassUp = p IemCpu->rcPassUp;765 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp; 765 766 if (rcOldPassUp == VINF_SUCCESS) 766 p IemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);767 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp); 767 768 /* If both are EM scheduling codes, use EM priority rules. */ 768 769 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST … … 772 773 { 773 774 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp)); 774 p IemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);775 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp); 775 776 } 776 777 else … … 781 782 { 782 783 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp)); 783 p IemCpu->rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);784 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp); 784 785 } 785 786 /* Don't override specific status code, first come first served. */ … … 811 812 * Initializes the execution state. 812 813 * 813 * @param pIemCpu The per CPU IEM state. 814 * @param pVCpu The cross context virtual CPU structure of the 815 * calling thread. 814 816 * @param fBypassHandlers Whether to bypass access handlers. 815 817 * … … 817 819 * side-effects in strict builds. 818 820 */ 819 DECLINLINE(void) iemInitExec(PIEMCPU pIemCpu, bool fBypassHandlers) 820 { 821 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 822 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 821 DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers) 822 { 823 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 823 824 824 825 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); … … 838 839 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu); 839 840 #endif 840 p IemCpu->uCpl = CPUMGetGuestCPL(pVCpu);841 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);841 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); 842 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 842 843 #ifdef VBOX_STRICT 843 p IemCpu->enmDefAddrMode = (IEMMODE)0xc0fe;844 p IemCpu->enmEffAddrMode = (IEMMODE)0xc0fe;845 p IemCpu->enmDefOpSize = (IEMMODE)0xc0fe;846 p IemCpu->enmEffOpSize = (IEMMODE)0xc0fe;847 p IemCpu->fPrefixes = (IEMMODE)0xfeedbeef;848 p IemCpu->uRexReg = 127;849 p IemCpu->uRexB = 127;850 p IemCpu->uRexIndex = 127;851 p IemCpu->iEffSeg = 127;852 p IemCpu->offOpcode = 127;853 p IemCpu->cbOpcode = 127;854 #endif 855 856 p IemCpu->cActiveMappings = 0;857 p IemCpu->iNextMapping = 0;858 p IemCpu->rcPassUp = VINF_SUCCESS;859 p IemCpu->fBypassHandlers = fBypassHandlers;844 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe; 845 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe; 846 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe; 847 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe; 848 pVCpu->iem.s.fPrefixes = (IEMMODE)0xfeedbeef; 849 pVCpu->iem.s.uRexReg = 127; 850 pVCpu->iem.s.uRexB = 127; 851 pVCpu->iem.s.uRexIndex = 127; 852 pVCpu->iem.s.iEffSeg = 127; 853 pVCpu->iem.s.offOpcode = 127; 854 pVCpu->iem.s.cbOpcode = 127; 855 #endif 856 857 pVCpu->iem.s.cActiveMappings = 0; 858 pVCpu->iem.s.iNextMapping = 0; 859 pVCpu->iem.s.rcPassUp = VINF_SUCCESS; 860 pVCpu->iem.s.fBypassHandlers = fBypassHandlers; 860 861 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 861 p IemCpu->fInPatchCode = pIemCpu->uCpl == 0862 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0 862 863 && pCtx->cs.u64Base == 0 863 864 && pCtx->cs.u32Limit == UINT32_MAX 864 && PATMIsPatchGCAddr( IEMCPU_TO_VM(pIemCpu), pCtx->eip);865 if (!p IemCpu->fInPatchCode)865 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip); 866 if (!pVCpu->iem.s.fInPatchCode) 866 867 CPUMRawLeave(pVCpu, VINF_SUCCESS); 867 868 #endif 868 869 869 870 #ifdef IEM_VERIFICATION_MODE_FULL 870 p IemCpu->fNoRemSavedByExec = pIemCpu->fNoRem;871 p IemCpu->fNoRem = true;871 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem; 872 pVCpu->iem.s.fNoRem = true; 872 873 #endif 873 874 } … … 877 878 * Counterpart to #iemInitExec that undoes evil strict-build stuff. 878 879 * 879 * @param pIemCpu The per CPU IEM state. 880 */ 881 DECLINLINE(void) iemUninitExec(PIEMCPU pIemCpu) 880 * @param pVCpu The cross context virtual CPU structure of the 881 * calling thread. 882 */ 883 DECLINLINE(void) iemUninitExec(PVMCPU pVCpu) 882 884 { 883 885 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */ 884 886 #ifdef IEM_VERIFICATION_MODE_FULL 885 p IemCpu->fNoRem = pIemCpu->fNoRemSavedByExec;887 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec; 886 888 #endif 887 889 #ifdef VBOX_STRICT 888 p IemCpu->cbOpcode = 0;890 pVCpu->iem.s.cbOpcode = 0; 889 891 #else 890 NOREF(p IemCpu);892 NOREF(pVCpu); 891 893 #endif 892 894 } … … 898 900 * iemReInitDecoder is mostly a copy of this function. 899 901 * 900 * @param pIemCpu The per CPU IEM state. 902 * @param pVCpu The cross context virtual CPU structure of the 903 * calling thread. 901 904 * @param fBypassHandlers Whether to bypass access handlers. 902 905 */ 903 DECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers) 904 { 905 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 906 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 906 DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers) 907 { 908 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 907 909 908 910 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); … … 922 924 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu); 923 925 #endif 924 p IemCpu->uCpl = CPUMGetGuestCPL(pVCpu);926 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); 925 927 #ifdef IEM_VERIFICATION_MODE_FULL 926 if (p IemCpu->uInjectCpl != UINT8_MAX)927 p IemCpu->uCpl = pIemCpu->uInjectCpl;928 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX) 929 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl; 928 930 #endif 929 931 IEMMODE enmMode = iemCalcCpuMode(pCtx); 930 p IemCpu->enmCpuMode = enmMode;931 p IemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */932 p IemCpu->enmEffAddrMode = enmMode;932 pVCpu->iem.s.enmCpuMode = enmMode; 933 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */ 934 pVCpu->iem.s.enmEffAddrMode = enmMode; 933 935 if (enmMode != IEMMODE_64BIT) 934 936 { 935 p IemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */936 p IemCpu->enmEffOpSize = enmMode;937 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */ 938 pVCpu->iem.s.enmEffOpSize = enmMode; 937 939 } 938 940 else 939 941 { 940 p IemCpu->enmDefOpSize = IEMMODE_32BIT;941 p IemCpu->enmEffOpSize = IEMMODE_32BIT;942 } 943 p IemCpu->fPrefixes = 0;944 p IemCpu->uRexReg = 0;945 p IemCpu->uRexB = 0;946 p IemCpu->uRexIndex = 0;947 p IemCpu->iEffSeg = X86_SREG_DS;948 p IemCpu->offOpcode = 0;949 p IemCpu->cbOpcode = 0;950 p IemCpu->cActiveMappings = 0;951 p IemCpu->iNextMapping = 0;952 p IemCpu->rcPassUp = VINF_SUCCESS;953 p IemCpu->fBypassHandlers = fBypassHandlers;942 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT; 943 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT; 944 } 945 pVCpu->iem.s.fPrefixes = 0; 946 pVCpu->iem.s.uRexReg = 0; 947 pVCpu->iem.s.uRexB = 0; 948 pVCpu->iem.s.uRexIndex = 0; 949 pVCpu->iem.s.iEffSeg = X86_SREG_DS; 950 pVCpu->iem.s.offOpcode = 0; 951 pVCpu->iem.s.cbOpcode = 0; 952 pVCpu->iem.s.cActiveMappings = 0; 953 pVCpu->iem.s.iNextMapping = 0; 954 pVCpu->iem.s.rcPassUp = VINF_SUCCESS; 955 pVCpu->iem.s.fBypassHandlers = fBypassHandlers; 954 956 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 955 p IemCpu->fInPatchCode = pIemCpu->uCpl == 0957 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0 956 958 && pCtx->cs.u64Base == 0 957 959 && pCtx->cs.u32Limit == UINT32_MAX 958 && PATMIsPatchGCAddr( IEMCPU_TO_VM(pIemCpu), pCtx->eip);959 if (!p IemCpu->fInPatchCode)960 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip); 961 if (!pVCpu->iem.s.fInPatchCode) 960 962 CPUMRawLeave(pVCpu, VINF_SUCCESS); 961 963 #endif … … 965 967 { 966 968 case IEMMODE_64BIT: 967 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", p IemCpu->uCpl, pCtx->rip);969 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip); 968 970 break; 969 971 case IEMMODE_32BIT: 970 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", p IemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);972 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip); 971 973 break; 972 974 case IEMMODE_16BIT: 973 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", p IemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);975 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip); 974 976 break; 975 977 } … … 984 986 * 985 987 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 986 * @param pIemCpu The per CPU IEM state. 987 */ 988 DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu, PIEMCPU pIemCpu) 989 { 990 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 988 */ 989 DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu) 990 { 991 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 991 992 992 993 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); … … 1003 1004 #endif 1004 1005 1005 p IemCpu->uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */1006 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */ 1006 1007 #ifdef IEM_VERIFICATION_MODE_FULL 1007 if (p IemCpu->uInjectCpl != UINT8_MAX)1008 p IemCpu->uCpl = pIemCpu->uInjectCpl;1008 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX) 1009 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl; 1009 1010 #endif 1010 1011 IEMMODE enmMode = iemCalcCpuMode(pCtx); 1011 p IemCpu->enmCpuMode = enmMode; /** @todo this should be updated during execution! */1012 p IemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */1013 p IemCpu->enmEffAddrMode = enmMode;1012 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */ 1013 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */ 1014 pVCpu->iem.s.enmEffAddrMode = enmMode; 1014 1015 if (enmMode != IEMMODE_64BIT) 1015 1016 { 1016 p IemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */1017 p IemCpu->enmEffOpSize = enmMode;1017 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */ 1018 pVCpu->iem.s.enmEffOpSize = enmMode; 1018 1019 } 1019 1020 else 1020 1021 { 1021 p IemCpu->enmDefOpSize = IEMMODE_32BIT;1022 p IemCpu->enmEffOpSize = IEMMODE_32BIT;1023 } 1024 p IemCpu->fPrefixes = 0;1025 p IemCpu->uRexReg = 0;1026 p IemCpu->uRexB = 0;1027 p IemCpu->uRexIndex = 0;1028 p IemCpu->iEffSeg = X86_SREG_DS;1029 if (p IemCpu->cbOpcode > pIemCpu->offOpcode) /* No need to check RIP here because branch instructions will update cbOpcode. */1030 { 1031 p IemCpu->cbOpcode -= pIemCpu->offOpcode;1032 memmove(&p IemCpu->abOpcode[0], &pIemCpu->abOpcode[pIemCpu->offOpcode], pIemCpu->cbOpcode);1022 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT; 1023 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT; 1024 } 1025 pVCpu->iem.s.fPrefixes = 0; 1026 pVCpu->iem.s.uRexReg = 0; 1027 pVCpu->iem.s.uRexB = 0; 1028 pVCpu->iem.s.uRexIndex = 0; 1029 pVCpu->iem.s.iEffSeg = X86_SREG_DS; 1030 if (pVCpu->iem.s.cbOpcode > pVCpu->iem.s.offOpcode) /* No need to check RIP here because branch instructions will update cbOpcode. */ 1031 { 1032 pVCpu->iem.s.cbOpcode -= pVCpu->iem.s.offOpcode; 1033 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode], pVCpu->iem.s.cbOpcode); 1033 1034 } 1034 1035 else 1035 p IemCpu->cbOpcode = 0;1036 p IemCpu->offOpcode = 0;1037 Assert(p IemCpu->cActiveMappings == 0);1038 p IemCpu->iNextMapping = 0;1039 Assert(p IemCpu->rcPassUp == VINF_SUCCESS);1040 Assert(p IemCpu->fBypassHandlers == false);1036 pVCpu->iem.s.cbOpcode = 0; 1037 pVCpu->iem.s.offOpcode = 0; 1038 Assert(pVCpu->iem.s.cActiveMappings == 0); 1039 pVCpu->iem.s.iNextMapping = 0; 1040 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS); 1041 Assert(pVCpu->iem.s.fBypassHandlers == false); 1041 1042 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 1042 if (!p IemCpu->fInPatchCode)1043 if (!pVCpu->iem.s.fInPatchCode) 1043 1044 { /* likely */ } 1044 1045 else 1045 1046 { 1046 p IemCpu->fInPatchCode = pIemCpu->uCpl == 01047 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0 1047 1048 && pCtx->cs.u64Base == 0 1048 1049 && pCtx->cs.u32Limit == UINT32_MAX 1049 && PATMIsPatchGCAddr( IEMCPU_TO_VM(pIemCpu), pCtx->eip);1050 if (!p IemCpu->fInPatchCode)1050 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip); 1051 if (!pVCpu->iem.s.fInPatchCode) 1051 1052 CPUMRawLeave(pVCpu, VINF_SUCCESS); 1052 1053 } … … 1057 1058 { 1058 1059 case IEMMODE_64BIT: 1059 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", p IemCpu->uCpl, pCtx->rip);1060 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip); 1060 1061 break; 1061 1062 case IEMMODE_32BIT: 1062 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", p IemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);1063 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip); 1063 1064 break; 1064 1065 case IEMMODE_16BIT: 1065 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", p IemCpu->uCpl, pCtx->cs.Sel, pCtx->eip);1066 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip); 1066 1067 break; 1067 1068 } … … 1075 1076 * 1076 1077 * @returns Strict VBox status code. 1077 * @param pIemCpu The IEM state. 1078 * @param pVCpu The cross context virtual CPU structure of the 1079 * calling thread. 1078 1080 * @param fBypassHandlers Whether to bypass access handlers. 1079 1081 */ 1080 IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(P IEMCPU pIemCpu, bool fBypassHandlers)1082 IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers) 1081 1083 { 1082 1084 #ifdef IEM_VERIFICATION_MODE_FULL 1083 uint8_t const cbOldOpcodes = p IemCpu->cbOpcode;1084 #endif 1085 iemInitDecoder(p IemCpu, fBypassHandlers);1085 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode; 1086 #endif 1087 iemInitDecoder(pVCpu, fBypassHandlers); 1086 1088 1087 1089 /* … … 1090 1092 * First translate CS:rIP to a physical address. 1091 1093 */ 1092 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);1094 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1093 1095 uint32_t cbToTryRead; 1094 1096 RTGCPTR GCPtrPC; 1095 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)1097 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1096 1098 { 1097 1099 cbToTryRead = PAGE_SIZE; 1098 1100 GCPtrPC = pCtx->rip; 1099 1101 if (!IEM_IS_CANONICAL(GCPtrPC)) 1100 return iemRaiseGeneralProtectionFault0(p IemCpu);1102 return iemRaiseGeneralProtectionFault0(pVCpu); 1101 1103 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK); 1102 1104 } … … 1104 1106 { 1105 1107 uint32_t GCPtrPC32 = pCtx->eip; 1106 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || p IemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));1108 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip)); 1107 1109 if (GCPtrPC32 > pCtx->cs.u32Limit) 1108 return iemRaiseSelectorBounds(p IemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1110 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1109 1111 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1; 1110 1112 if (!cbToTryRead) /* overflowed */ … … 1120 1122 /* Allow interpretation of patch manager code blocks since they can for 1121 1123 instance throw #PFs for perfectly good reasons. */ 1122 if (p IemCpu->fInPatchCode)1124 if (pVCpu->iem.s.fInPatchCode) 1123 1125 { 1124 1126 size_t cbRead = 0; 1125 int rc = PATMReadPatchCode( IEMCPU_TO_VM(pIemCpu), GCPtrPC, pIemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbRead);1127 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead); 1126 1128 AssertRCReturn(rc, rc); 1127 p IemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);1129 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0); 1128 1130 return VINF_SUCCESS; 1129 1131 } … … 1132 1134 RTGCPHYS GCPhys; 1133 1135 uint64_t fFlags; 1134 int rc = PGMGstGetPage( IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);1136 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys); 1135 1137 if (RT_FAILURE(rc)) 1136 1138 { 1137 1139 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc)); 1138 return iemRaisePageFault(p IemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);1139 } 1140 if (!(fFlags & X86_PTE_US) && p IemCpu->uCpl == 3)1140 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc); 1141 } 1142 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3) 1141 1143 { 1142 1144 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC)); 1143 return iemRaisePageFault(p IemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1145 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1144 1146 } 1145 1147 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1146 1148 { 1147 1149 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC)); 1148 return iemRaisePageFault(p IemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1150 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1149 1151 } 1150 1152 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK; … … 1159 1161 */ 1160 1162 /** @todo optimize this differently by not using PGMPhysRead. */ 1161 RTGCPHYS const offPrevOpcodes = GCPhys - p IemCpu->GCPhysOpcodes;1162 p IemCpu->GCPhysOpcodes = GCPhys;1163 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes; 1164 pVCpu->iem.s.GCPhysOpcodes = GCPhys; 1163 1165 if ( offPrevOpcodes < cbOldOpcodes 1164 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(p IemCpu->abOpcode))1166 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode)) 1165 1167 { 1166 1168 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes; 1167 Assert(cbNew <= RT_ELEMENTS(p IemCpu->abOpcode));1168 memmove(&p IemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);1169 p IemCpu->cbOpcode = cbNew;1169 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode)); 1170 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew); 1171 pVCpu->iem.s.cbOpcode = cbNew; 1170 1172 return VINF_SUCCESS; 1171 1173 } … … 1175 1177 * Read the bytes at this address. 1176 1178 */ 1177 PVM pVM = IEMCPU_TO_VM(pIemCpu);1179 PVM pVM = pVCpu->CTX_SUFF(pVM); 1178 1180 #if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0) 1179 1181 size_t cbActual; 1180 1182 if ( PATMIsEnabled(pVM) 1181 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, p IemCpu->abOpcode, sizeof(pIemCpu->abOpcode), &cbActual)))1183 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual))) 1182 1184 { 1183 1185 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC)); 1184 1186 Assert(cbActual > 0); 1185 p IemCpu->cbOpcode = (uint8_t)cbActual;1187 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual; 1186 1188 } 1187 1189 else … … 1191 1193 if (cbToTryRead > cbLeftOnPage) 1192 1194 cbToTryRead = cbLeftOnPage; 1193 if (cbToTryRead > sizeof(p IemCpu->abOpcode))1194 cbToTryRead = sizeof(p IemCpu->abOpcode);1195 1196 if (!p IemCpu->fBypassHandlers)1195 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode)) 1196 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode); 1197 1198 if (!pVCpu->iem.s.fBypassHandlers) 1197 1199 { 1198 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, p IemCpu->abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);1200 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM); 1199 1201 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 1200 1202 { /* likely */ } … … 1203 1205 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1204 1206 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1205 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);1207 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1206 1208 } 1207 1209 else … … 1216 1218 else 1217 1219 { 1218 rc = PGMPhysSimpleReadGCPhys(pVM, p IemCpu->abOpcode, GCPhys, cbToTryRead);1220 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead); 1219 1221 if (RT_SUCCESS(rc)) 1220 1222 { /* likely */ } … … 1226 1228 } 1227 1229 } 1228 p IemCpu->cbOpcode = cbToTryRead;1230 pVCpu->iem.s.cbOpcode = cbToTryRead; 1229 1231 } 1230 1232 … … 1238 1240 * 1239 1241 * @returns Strict VBox status code. 1240 * @param pIemCpu The IEM state. 1242 * @param pVCpu The cross context virtual CPU structure of the 1243 * calling thread. 1241 1244 * @param cbMin The minimum number of bytes relative offOpcode 1242 1245 * that must be read. 1243 1246 */ 1244 IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(P IEMCPU pIemCpu, size_t cbMin)1247 IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin) 1245 1248 { 1246 1249 /* … … 1249 1252 * First translate CS:rIP to a physical address. 1250 1253 */ 1251 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);1252 uint8_t cbLeft = p IemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);1254 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1255 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin); 1253 1256 uint32_t cbToTryRead; 1254 1257 RTGCPTR GCPtrNext; 1255 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)1258 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1256 1259 { 1257 1260 cbToTryRead = PAGE_SIZE; 1258 GCPtrNext = pCtx->rip + p IemCpu->cbOpcode;1261 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode; 1259 1262 if (!IEM_IS_CANONICAL(GCPtrNext)) 1260 return iemRaiseGeneralProtectionFault0(p IemCpu);1263 return iemRaiseGeneralProtectionFault0(pVCpu); 1261 1264 } 1262 1265 else 1263 1266 { 1264 1267 uint32_t GCPtrNext32 = pCtx->eip; 1265 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || p IemCpu->enmCpuMode == IEMMODE_32BIT);1266 GCPtrNext32 += p IemCpu->cbOpcode;1268 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1269 GCPtrNext32 += pVCpu->iem.s.cbOpcode; 1267 1270 if (GCPtrNext32 > pCtx->cs.u32Limit) 1268 return iemRaiseSelectorBounds(p IemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1271 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1269 1272 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1; 1270 1273 if (!cbToTryRead) /* overflowed */ … … 1275 1278 } 1276 1279 if (cbToTryRead < cbMin - cbLeft) 1277 return iemRaiseSelectorBounds(p IemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1280 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1278 1281 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32; 1279 1282 } … … 1284 1287 if (cbToTryRead > cbLeftOnPage) 1285 1288 cbToTryRead = cbLeftOnPage; 1286 if (cbToTryRead > sizeof(p IemCpu->abOpcode) - pIemCpu->cbOpcode)1287 cbToTryRead = sizeof(p IemCpu->abOpcode) - pIemCpu->cbOpcode;1289 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode) 1290 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode; 1288 1291 /** @todo r=bird: Convert assertion into undefined opcode exception? */ 1289 1292 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ … … 1292 1295 /* Allow interpretation of patch manager code blocks since they can for 1293 1296 instance throw #PFs for perfectly good reasons. */ 1294 if (p IemCpu->fInPatchCode)1297 if (pVCpu->iem.s.fInPatchCode) 1295 1298 { 1296 1299 size_t cbRead = 0; 1297 int rc = PATMReadPatchCode( IEMCPU_TO_VM(pIemCpu), GCPtrNext, pIemCpu->abOpcode, cbToTryRead, &cbRead);1300 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead); 1298 1301 AssertRCReturn(rc, rc); 1299 p IemCpu->cbOpcode = (uint8_t)cbRead; Assert(pIemCpu->cbOpcode == cbRead); Assert(cbRead > 0);1302 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0); 1300 1303 return VINF_SUCCESS; 1301 1304 } … … 1304 1307 RTGCPHYS GCPhys; 1305 1308 uint64_t fFlags; 1306 int rc = PGMGstGetPage( IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);1309 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys); 1307 1310 if (RT_FAILURE(rc)) 1308 1311 { 1309 1312 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc)); 1310 return iemRaisePageFault(p IemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);1311 } 1312 if (!(fFlags & X86_PTE_US) && p IemCpu->uCpl == 3)1313 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc); 1314 } 1315 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3) 1313 1316 { 1314 1317 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext)); 1315 return iemRaisePageFault(p IemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1318 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1316 1319 } 1317 1320 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE)) 1318 1321 { 1319 1322 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext)); 1320 return iemRaisePageFault(p IemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1323 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1321 1324 } 1322 1325 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK; 1323 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, p IemCpu->cbOpcode));1326 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode)); 1324 1327 /** @todo Check reserved bits and such stuff. PGM is better at doing 1325 1328 * that, so do it when implementing the guest virtual address … … 1333 1336 * should be no need to check again here. 1334 1337 */ 1335 if (!p IemCpu->fBypassHandlers)1336 { 1337 VBOXSTRICTRC rcStrict = PGMPhysRead( IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode],1338 if (!pVCpu->iem.s.fBypassHandlers) 1339 { 1340 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], 1338 1341 cbToTryRead, PGMACCESSORIGIN_IEM); 1339 1342 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) … … 1343 1346 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n", 1344 1347 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead)); 1345 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);1348 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1346 1349 } 1347 1350 else … … 1356 1359 else 1357 1360 { 1358 rc = PGMPhysSimpleReadGCPhys( IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);1361 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead); 1359 1362 if (RT_SUCCESS(rc)) 1360 1363 { /* likely */ } … … 1365 1368 } 1366 1369 } 1367 p IemCpu->cbOpcode += cbToTryRead;1368 Log5(("%.*Rhxs\n", p IemCpu->cbOpcode, pIemCpu->abOpcode));1370 pVCpu->iem.s.cbOpcode += cbToTryRead; 1371 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode)); 1369 1372 1370 1373 return VINF_SUCCESS; … … 1377 1380 * 1378 1381 * @returns Strict VBox status code. 1379 * @param pIemCpu The IEM state. 1382 * @param pVCpu The cross context virtual CPU structure of the 1383 * calling thread. 1380 1384 * @param pb Where to return the opcode byte. 1381 1385 */ 1382 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(P IEMCPU pIemCpu, uint8_t *pb)1383 { 1384 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 1);1386 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb) 1387 { 1388 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1); 1385 1389 if (rcStrict == VINF_SUCCESS) 1386 1390 { 1387 uint8_t offOpcode = p IemCpu->offOpcode;1388 *pb = p IemCpu->abOpcode[offOpcode];1389 p IemCpu->offOpcode = offOpcode + 1;1391 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1392 *pb = pVCpu->iem.s.abOpcode[offOpcode]; 1393 pVCpu->iem.s.offOpcode = offOpcode + 1; 1390 1394 } 1391 1395 else … … 1399 1403 * 1400 1404 * @returns Strict VBox status code. 1401 * @param pIemCpu The IEM state. 1405 * @param pVCpu The cross context virtual CPU structure of the 1406 * calling thread. 1402 1407 * @param pu8 Where to return the opcode byte. 1403 1408 */ 1404 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(P IEMCPU pIemCpu, uint8_t *pu8)1405 { 1406 uintptr_t const offOpcode = p IemCpu->offOpcode;1407 if (RT_LIKELY((uint8_t)offOpcode < p IemCpu->cbOpcode))1408 { 1409 p IemCpu->offOpcode = (uint8_t)offOpcode + 1;1410 *pu8 = p IemCpu->abOpcode[offOpcode];1409 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8) 1410 { 1411 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1412 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) 1413 { 1414 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1; 1415 *pu8 = pVCpu->iem.s.abOpcode[offOpcode]; 1411 1416 return VINF_SUCCESS; 1412 1417 } 1413 return iemOpcodeGetNextU8Slow(p IemCpu, pu8);1418 return iemOpcodeGetNextU8Slow(pVCpu, pu8); 1414 1419 } 1415 1420 … … 1420 1425 * 1421 1426 * @returns The opcode byte. 1422 * @param p IemCpu The IEM state.1423 */ 1424 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(P IEMCPU pIemCpu)1425 { 1426 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 1);1427 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1428 */ 1429 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu) 1430 { 1431 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1); 1427 1432 if (rcStrict == VINF_SUCCESS) 1428 return p IemCpu->abOpcode[pIemCpu->offOpcode++];1429 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));1433 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++]; 1434 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1430 1435 } 1431 1436 … … 1435 1440 * 1436 1441 * @returns The opcode byte. 1437 * @param p IemCpu The IEM state.1438 */ 1439 DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(P IEMCPU pIemCpu)1440 { 1441 uintptr_t offOpcode = p IemCpu->offOpcode;1442 if (RT_LIKELY((uint8_t)offOpcode < p IemCpu->cbOpcode))1443 { 1444 p IemCpu->offOpcode = (uint8_t)offOpcode + 1;1445 return p IemCpu->abOpcode[offOpcode];1446 } 1447 return iemOpcodeGetNextU8SlowJmp(p IemCpu);1442 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1443 */ 1444 DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu) 1445 { 1446 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 1447 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) 1448 { 1449 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1; 1450 return pVCpu->iem.s.abOpcode[offOpcode]; 1451 } 1452 return iemOpcodeGetNextU8SlowJmp(pVCpu); 1448 1453 } 1449 1454 … … 1454 1459 * 1455 1460 * @param a_pu8 Where to return the opcode byte. 1456 * @remark Implicitly references p IemCpu.1461 * @remark Implicitly references pVCpu. 1457 1462 */ 1458 1463 #ifndef IEM_WITH_SETJMP … … 1460 1465 do \ 1461 1466 { \ 1462 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(p IemCpu, (a_pu8)); \1467 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \ 1463 1468 if (rcStrict2 == VINF_SUCCESS) \ 1464 1469 { /* likely */ } \ … … 1467 1472 } while (0) 1468 1473 #else 1469 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(p IemCpu))1474 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu)) 1470 1475 #endif /* IEM_WITH_SETJMP */ 1471 1476 … … 1476 1481 * 1477 1482 * @returns Strict VBox status code. 1478 * @param p IemCpu The IEM state.1483 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1479 1484 * @param pi8 Where to return the signed byte. 1480 1485 */ 1481 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(P IEMCPU pIemCpu, int8_t *pi8)1482 { 1483 return iemOpcodeGetNextU8(p IemCpu, (uint8_t *)pi8);1486 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8) 1487 { 1488 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8); 1484 1489 } 1485 1490 #endif /* !IEM_WITH_SETJMP */ … … 1491 1496 * 1492 1497 * @param a_pi8 Where to return the signed byte. 1493 * @remark Implicitly references p IemCpu.1498 * @remark Implicitly references pVCpu. 1494 1499 */ 1495 1500 #ifndef IEM_WITH_SETJMP … … 1497 1502 do \ 1498 1503 { \ 1499 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(p IemCpu, (a_pi8)); \1504 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \ 1500 1505 if (rcStrict2 != VINF_SUCCESS) \ 1501 1506 return rcStrict2; \ 1502 1507 } while (0) 1503 1508 #else /* IEM_WITH_SETJMP */ 1504 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(p IemCpu))1509 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 1505 1510 1506 1511 #endif /* IEM_WITH_SETJMP */ … … 1512 1517 * 1513 1518 * @returns Strict VBox status code. 1514 * @param p IemCpu The IEM state.1519 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1515 1520 * @param pu16 Where to return the opcode dword. 1516 1521 */ 1517 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(P IEMCPU pIemCpu, uint16_t *pu16)1522 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16) 1518 1523 { 1519 1524 uint8_t u8; 1520 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(p IemCpu, &u8);1525 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8); 1521 1526 if (rcStrict == VINF_SUCCESS) 1522 1527 *pu16 = (int8_t)u8; … … 1530 1535 * 1531 1536 * @returns Strict VBox status code. 1532 * @param p IemCpu The IEM state.1537 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1533 1538 * @param pu16 Where to return the unsigned word. 1534 1539 */ 1535 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(P IEMCPU pIemCpu, uint16_t *pu16)1536 { 1537 uint8_t const offOpcode = p IemCpu->offOpcode;1538 if (RT_UNLIKELY(offOpcode >= p IemCpu->cbOpcode))1539 return iemOpcodeGetNextS8SxU16Slow(p IemCpu, pu16);1540 1541 *pu16 = (int8_t)p IemCpu->abOpcode[offOpcode];1542 p IemCpu->offOpcode = offOpcode + 1;1540 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16) 1541 { 1542 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 1543 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode)) 1544 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16); 1545 1546 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode]; 1547 pVCpu->iem.s.offOpcode = offOpcode + 1; 1543 1548 return VINF_SUCCESS; 1544 1549 } … … 1551 1556 * 1552 1557 * @param a_pu16 Where to return the word. 1553 * @remark Implicitly references p IemCpu.1558 * @remark Implicitly references pVCpu. 1554 1559 */ 1555 1560 #ifndef IEM_WITH_SETJMP … … 1557 1562 do \ 1558 1563 { \ 1559 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(p IemCpu, (a_pu16)); \1564 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \ 1560 1565 if (rcStrict2 != VINF_SUCCESS) \ 1561 1566 return rcStrict2; \ 1562 1567 } while (0) 1563 1568 #else 1564 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(p IemCpu))1569 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 1565 1570 #endif 1566 1571 … … 1571 1576 * 1572 1577 * @returns Strict VBox status code. 1573 * @param p IemCpu The IEM state.1578 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1574 1579 * @param pu32 Where to return the opcode dword. 1575 1580 */ 1576 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(P IEMCPU pIemCpu, uint32_t *pu32)1581 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32) 1577 1582 { 1578 1583 uint8_t u8; 1579 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(p IemCpu, &u8);1584 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8); 1580 1585 if (rcStrict == VINF_SUCCESS) 1581 1586 *pu32 = (int8_t)u8; … … 1589 1594 * 1590 1595 * @returns Strict VBox status code. 1591 * @param p IemCpu The IEM state.1596 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1592 1597 * @param pu32 Where to return the unsigned dword. 1593 1598 */ 1594 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(P IEMCPU pIemCpu, uint32_t *pu32)1595 { 1596 uint8_t const offOpcode = p IemCpu->offOpcode;1597 if (RT_UNLIKELY(offOpcode >= p IemCpu->cbOpcode))1598 return iemOpcodeGetNextS8SxU32Slow(p IemCpu, pu32);1599 1600 *pu32 = (int8_t)p IemCpu->abOpcode[offOpcode];1601 p IemCpu->offOpcode = offOpcode + 1;1599 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32) 1600 { 1601 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 1602 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode)) 1603 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32); 1604 1605 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode]; 1606 pVCpu->iem.s.offOpcode = offOpcode + 1; 1602 1607 return VINF_SUCCESS; 1603 1608 } … … 1610 1615 * 1611 1616 * @param a_pu32 Where to return the word. 1612 * @remark Implicitly references p IemCpu.1617 * @remark Implicitly references pVCpu. 1613 1618 */ 1614 1619 #ifndef IEM_WITH_SETJMP … … 1616 1621 do \ 1617 1622 { \ 1618 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(p IemCpu, (a_pu32)); \1623 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \ 1619 1624 if (rcStrict2 != VINF_SUCCESS) \ 1620 1625 return rcStrict2; \ 1621 1626 } while (0) 1622 1627 #else 1623 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(p IemCpu))1628 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 1624 1629 #endif 1625 1630 … … 1630 1635 * 1631 1636 * @returns Strict VBox status code. 1632 * @param p IemCpu The IEM state.1637 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1633 1638 * @param pu64 Where to return the opcode qword. 1634 1639 */ 1635 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(P IEMCPU pIemCpu, uint64_t *pu64)1640 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64) 1636 1641 { 1637 1642 uint8_t u8; 1638 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(p IemCpu, &u8);1643 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8); 1639 1644 if (rcStrict == VINF_SUCCESS) 1640 1645 *pu64 = (int8_t)u8; … … 1648 1653 * 1649 1654 * @returns Strict VBox status code. 1650 * @param p IemCpu The IEM state.1655 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1651 1656 * @param pu64 Where to return the unsigned qword. 1652 1657 */ 1653 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(P IEMCPU pIemCpu, uint64_t *pu64)1654 { 1655 uint8_t const offOpcode = p IemCpu->offOpcode;1656 if (RT_UNLIKELY(offOpcode >= p IemCpu->cbOpcode))1657 return iemOpcodeGetNextS8SxU64Slow(p IemCpu, pu64);1658 1659 *pu64 = (int8_t)p IemCpu->abOpcode[offOpcode];1660 p IemCpu->offOpcode = offOpcode + 1;1658 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64) 1659 { 1660 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 1661 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode)) 1662 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64); 1663 1664 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode]; 1665 pVCpu->iem.s.offOpcode = offOpcode + 1; 1661 1666 return VINF_SUCCESS; 1662 1667 } … … 1670 1675 * 1671 1676 * @param a_pu64 Where to return the word. 1672 * @remark Implicitly references p IemCpu.1677 * @remark Implicitly references pVCpu. 1673 1678 */ 1674 1679 #ifndef IEM_WITH_SETJMP … … 1676 1681 do \ 1677 1682 { \ 1678 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(p IemCpu, (a_pu64)); \1683 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \ 1679 1684 if (rcStrict2 != VINF_SUCCESS) \ 1680 1685 return rcStrict2; \ 1681 1686 } while (0) 1682 1687 #else 1683 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(p IemCpu))1688 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 1684 1689 #endif 1685 1690 … … 1691 1696 * 1692 1697 * @returns Strict VBox status code. 1693 * @param p IemCpu The IEM state.1698 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1694 1699 * @param pu16 Where to return the opcode word. 1695 1700 */ 1696 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(P IEMCPU pIemCpu, uint16_t *pu16)1697 { 1698 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 2);1701 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16) 1702 { 1703 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2); 1699 1704 if (rcStrict == VINF_SUCCESS) 1700 1705 { 1701 uint8_t offOpcode = p IemCpu->offOpcode;1702 *pu16 = RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1703 p IemCpu->offOpcode = offOpcode + 2;1706 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1707 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1708 pVCpu->iem.s.offOpcode = offOpcode + 2; 1704 1709 } 1705 1710 else … … 1713 1718 * 1714 1719 * @returns Strict VBox status code. 1715 * @param p IemCpu The IEM state.1720 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1716 1721 * @param pu16 Where to return the opcode word. 1717 1722 */ 1718 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(P IEMCPU pIemCpu, uint16_t *pu16)1719 { 1720 uintptr_t const offOpcode = p IemCpu->offOpcode;1721 if (RT_LIKELY((uint8_t)offOpcode + 2 <= p IemCpu->cbOpcode))1722 { 1723 p IemCpu->offOpcode = (uint8_t)offOpcode + 2;1724 *pu16 = RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1723 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16) 1724 { 1725 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1726 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode)) 1727 { 1728 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2; 1729 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1725 1730 return VINF_SUCCESS; 1726 1731 } 1727 return iemOpcodeGetNextU16Slow(p IemCpu, pu16);1732 return iemOpcodeGetNextU16Slow(pVCpu, pu16); 1728 1733 } 1729 1734 … … 1734 1739 * 1735 1740 * @returns The opcode word. 1736 * @param p IemCpu The IEM state.1737 */ 1738 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(P IEMCPU pIemCpu)1739 { 1740 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 2);1741 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1742 */ 1743 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu) 1744 { 1745 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2); 1741 1746 if (rcStrict == VINF_SUCCESS) 1742 1747 { 1743 uint8_t offOpcode = p IemCpu->offOpcode;1744 p IemCpu->offOpcode += 2;1745 return RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1746 } 1747 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));1748 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1749 pVCpu->iem.s.offOpcode += 2; 1750 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1751 } 1752 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 1748 1753 } 1749 1754 … … 1753 1758 * 1754 1759 * @returns The opcode word. 1755 * @param p IemCpu The IEM state.1756 */ 1757 DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(P IEMCPU pIemCpu)1758 { 1759 uintptr_t const offOpcode = p IemCpu->offOpcode;1760 if (RT_LIKELY((uint8_t)offOpcode + 2 <= p IemCpu->cbOpcode))1761 { 1762 p IemCpu->offOpcode = (uint8_t)offOpcode + 2;1763 return RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1764 } 1765 return iemOpcodeGetNextU16SlowJmp(p IemCpu);1760 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1761 */ 1762 DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu) 1763 { 1764 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1765 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode)) 1766 { 1767 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2; 1768 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1769 } 1770 return iemOpcodeGetNextU16SlowJmp(pVCpu); 1766 1771 } 1767 1772 … … 1773 1778 * 1774 1779 * @param a_pu16 Where to return the opcode word. 1775 * @remark Implicitly references p IemCpu.1780 * @remark Implicitly references pVCpu. 1776 1781 */ 1777 1782 #ifndef IEM_WITH_SETJMP … … 1779 1784 do \ 1780 1785 { \ 1781 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(p IemCpu, (a_pu16)); \1786 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \ 1782 1787 if (rcStrict2 != VINF_SUCCESS) \ 1783 1788 return rcStrict2; \ 1784 1789 } while (0) 1785 1790 #else 1786 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(p IemCpu))1791 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu)) 1787 1792 #endif 1788 1793 … … 1793 1798 * 1794 1799 * @returns Strict VBox status code. 1795 * @param p IemCpu The IEM state.1800 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1796 1801 * @param pu32 Where to return the opcode double word. 1797 1802 */ 1798 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(P IEMCPU pIemCpu, uint32_t *pu32)1799 { 1800 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 2);1803 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32) 1804 { 1805 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2); 1801 1806 if (rcStrict == VINF_SUCCESS) 1802 1807 { 1803 uint8_t offOpcode = p IemCpu->offOpcode;1804 *pu32 = RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1805 p IemCpu->offOpcode = offOpcode + 2;1808 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1809 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1810 pVCpu->iem.s.offOpcode = offOpcode + 2; 1806 1811 } 1807 1812 else … … 1815 1820 * 1816 1821 * @returns Strict VBox status code. 1817 * @param p IemCpu The IEM state.1822 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1818 1823 * @param pu32 Where to return the opcode double word. 1819 1824 */ 1820 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(P IEMCPU pIemCpu, uint32_t *pu32)1821 { 1822 uint8_t const offOpcode = p IemCpu->offOpcode;1823 if (RT_UNLIKELY(offOpcode + 2 > p IemCpu->cbOpcode))1824 return iemOpcodeGetNextU16ZxU32Slow(p IemCpu, pu32);1825 1826 *pu32 = RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1827 p IemCpu->offOpcode = offOpcode + 2;1825 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32) 1826 { 1827 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 1828 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode)) 1829 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32); 1830 1831 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1832 pVCpu->iem.s.offOpcode = offOpcode + 2; 1828 1833 return VINF_SUCCESS; 1829 1834 } … … 1837 1842 * 1838 1843 * @param a_pu32 Where to return the opcode double word. 1839 * @remark Implicitly references p IemCpu.1844 * @remark Implicitly references pVCpu. 1840 1845 */ 1841 1846 #ifndef IEM_WITH_SETJMP … … 1843 1848 do \ 1844 1849 { \ 1845 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(p IemCpu, (a_pu32)); \1850 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \ 1846 1851 if (rcStrict2 != VINF_SUCCESS) \ 1847 1852 return rcStrict2; \ 1848 1853 } while (0) 1849 1854 #else 1850 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(p IemCpu))1855 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu)) 1851 1856 #endif 1852 1857 … … 1857 1862 * 1858 1863 * @returns Strict VBox status code. 1859 * @param p IemCpu The IEM state.1864 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1860 1865 * @param pu64 Where to return the opcode quad word. 1861 1866 */ 1862 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(P IEMCPU pIemCpu, uint64_t *pu64)1863 { 1864 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 2);1867 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64) 1868 { 1869 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2); 1865 1870 if (rcStrict == VINF_SUCCESS) 1866 1871 { 1867 uint8_t offOpcode = p IemCpu->offOpcode;1868 *pu64 = RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1869 p IemCpu->offOpcode = offOpcode + 2;1872 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1873 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1874 pVCpu->iem.s.offOpcode = offOpcode + 2; 1870 1875 } 1871 1876 else … … 1879 1884 * 1880 1885 * @returns Strict VBox status code. 1881 * @param p IemCpu The IEM state.1886 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1882 1887 * @param pu64 Where to return the opcode quad word. 1883 1888 */ 1884 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(P IEMCPU pIemCpu, uint64_t *pu64)1885 { 1886 uint8_t const offOpcode = p IemCpu->offOpcode;1887 if (RT_UNLIKELY(offOpcode + 2 > p IemCpu->cbOpcode))1888 return iemOpcodeGetNextU16ZxU64Slow(p IemCpu, pu64);1889 1890 *pu64 = RT_MAKE_U16(p IemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);1891 p IemCpu->offOpcode = offOpcode + 2;1889 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64) 1890 { 1891 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 1892 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode)) 1893 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64); 1894 1895 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 1896 pVCpu->iem.s.offOpcode = offOpcode + 2; 1892 1897 return VINF_SUCCESS; 1893 1898 } … … 1900 1905 * 1901 1906 * @param a_pu64 Where to return the opcode quad word. 1902 * @remark Implicitly references p IemCpu.1907 * @remark Implicitly references pVCpu. 1903 1908 */ 1904 1909 #ifndef IEM_WITH_SETJMP … … 1906 1911 do \ 1907 1912 { \ 1908 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(p IemCpu, (a_pu64)); \1913 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \ 1909 1914 if (rcStrict2 != VINF_SUCCESS) \ 1910 1915 return rcStrict2; \ 1911 1916 } while (0) 1912 1917 #else 1913 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(p IemCpu))1918 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu)) 1914 1919 #endif 1915 1920 … … 1920 1925 * 1921 1926 * @returns Strict VBox status code. 1922 * @param p IemCpu The IEM state.1927 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1923 1928 * @param pi16 Where to return the signed word. 1924 1929 */ 1925 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(P IEMCPU pIemCpu, int16_t *pi16)1926 { 1927 return iemOpcodeGetNextU16(p IemCpu, (uint16_t *)pi16);1930 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16) 1931 { 1932 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16); 1928 1933 } 1929 1934 #endif /* !IEM_WITH_SETJMP */ … … 1935 1940 * 1936 1941 * @param a_pi16 Where to return the signed word. 1937 * @remark Implicitly references p IemCpu.1942 * @remark Implicitly references pVCpu. 1938 1943 */ 1939 1944 #ifndef IEM_WITH_SETJMP … … 1941 1946 do \ 1942 1947 { \ 1943 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(p IemCpu, (a_pi16)); \1948 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \ 1944 1949 if (rcStrict2 != VINF_SUCCESS) \ 1945 1950 return rcStrict2; \ 1946 1951 } while (0) 1947 1952 #else 1948 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(p IemCpu))1953 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu)) 1949 1954 #endif 1950 1955 … … 1955 1960 * 1956 1961 * @returns Strict VBox status code. 1957 * @param p IemCpu The IEM state.1962 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1958 1963 * @param pu32 Where to return the opcode dword. 1959 1964 */ 1960 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(P IEMCPU pIemCpu, uint32_t *pu32)1961 { 1962 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 4);1965 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32) 1966 { 1967 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4); 1963 1968 if (rcStrict == VINF_SUCCESS) 1964 1969 { 1965 uint8_t offOpcode = p IemCpu->offOpcode;1966 *pu32 = RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],1967 p IemCpu->abOpcode[offOpcode + 1],1968 p IemCpu->abOpcode[offOpcode + 2],1969 p IemCpu->abOpcode[offOpcode + 3]);1970 p IemCpu->offOpcode = offOpcode + 4;1970 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 1971 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 1972 pVCpu->iem.s.abOpcode[offOpcode + 1], 1973 pVCpu->iem.s.abOpcode[offOpcode + 2], 1974 pVCpu->iem.s.abOpcode[offOpcode + 3]); 1975 pVCpu->iem.s.offOpcode = offOpcode + 4; 1971 1976 } 1972 1977 else … … 1980 1985 * 1981 1986 * @returns Strict VBox status code. 1982 * @param p IemCpu The IEM state.1987 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1983 1988 * @param pu32 Where to return the opcode double word. 1984 1989 */ 1985 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(P IEMCPU pIemCpu, uint32_t *pu32)1986 { 1987 uintptr_t const offOpcode = p IemCpu->offOpcode;1988 if (RT_LIKELY((uint8_t)offOpcode + 4 <= p IemCpu->cbOpcode))1989 { 1990 p IemCpu->offOpcode = (uint8_t)offOpcode + 4;1991 *pu32 = RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],1992 p IemCpu->abOpcode[offOpcode + 1],1993 p IemCpu->abOpcode[offOpcode + 2],1994 p IemCpu->abOpcode[offOpcode + 3]);1990 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32) 1991 { 1992 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1993 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode)) 1994 { 1995 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4; 1996 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 1997 pVCpu->iem.s.abOpcode[offOpcode + 1], 1998 pVCpu->iem.s.abOpcode[offOpcode + 2], 1999 pVCpu->iem.s.abOpcode[offOpcode + 3]); 1995 2000 return VINF_SUCCESS; 1996 2001 } 1997 return iemOpcodeGetNextU32Slow(p IemCpu, pu32);2002 return iemOpcodeGetNextU32Slow(pVCpu, pu32); 1998 2003 } 1999 2004 … … 2004 2009 * 2005 2010 * @returns The opcode dword. 2006 * @param p IemCpu The IEM state.2007 */ 2008 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(P IEMCPU pIemCpu)2009 { 2010 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 4);2011 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2012 */ 2013 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu) 2014 { 2015 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4); 2011 2016 if (rcStrict == VINF_SUCCESS) 2012 2017 { 2013 uint8_t offOpcode = p IemCpu->offOpcode;2014 p IemCpu->offOpcode = offOpcode + 4;2015 return RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],2016 p IemCpu->abOpcode[offOpcode + 1],2017 p IemCpu->abOpcode[offOpcode + 2],2018 p IemCpu->abOpcode[offOpcode + 3]);2019 } 2020 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));2018 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 2019 pVCpu->iem.s.offOpcode = offOpcode + 4; 2020 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2021 pVCpu->iem.s.abOpcode[offOpcode + 1], 2022 pVCpu->iem.s.abOpcode[offOpcode + 2], 2023 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2024 } 2025 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 2021 2026 } 2022 2027 … … 2026 2031 * 2027 2032 * @returns The opcode dword. 2028 * @param p IemCpu The IEM state.2029 */ 2030 DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(P IEMCPU pIemCpu)2031 { 2032 uintptr_t const offOpcode = p IemCpu->offOpcode;2033 if (RT_LIKELY((uint8_t)offOpcode + 4 <= p IemCpu->cbOpcode))2034 { 2035 p IemCpu->offOpcode = (uint8_t)offOpcode + 4;2036 return RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],2037 p IemCpu->abOpcode[offOpcode + 1],2038 p IemCpu->abOpcode[offOpcode + 2],2039 p IemCpu->abOpcode[offOpcode + 3]);2040 } 2041 return iemOpcodeGetNextU32SlowJmp(p IemCpu);2033 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2034 */ 2035 DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu) 2036 { 2037 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 2038 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode)) 2039 { 2040 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4; 2041 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2042 pVCpu->iem.s.abOpcode[offOpcode + 1], 2043 pVCpu->iem.s.abOpcode[offOpcode + 2], 2044 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2045 } 2046 return iemOpcodeGetNextU32SlowJmp(pVCpu); 2042 2047 } 2043 2048 … … 2049 2054 * 2050 2055 * @param a_pu32 Where to return the opcode dword. 2051 * @remark Implicitly references p IemCpu.2056 * @remark Implicitly references pVCpu. 2052 2057 */ 2053 2058 #ifndef IEM_WITH_SETJMP … … 2055 2060 do \ 2056 2061 { \ 2057 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(p IemCpu, (a_pu32)); \2062 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \ 2058 2063 if (rcStrict2 != VINF_SUCCESS) \ 2059 2064 return rcStrict2; \ 2060 2065 } while (0) 2061 2066 #else 2062 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(p IemCpu))2067 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu)) 2063 2068 #endif 2064 2069 … … 2069 2074 * 2070 2075 * @returns Strict VBox status code. 2071 * @param p IemCpu The IEM state.2076 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2072 2077 * @param pu64 Where to return the opcode dword. 2073 2078 */ 2074 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(P IEMCPU pIemCpu, uint64_t *pu64)2075 { 2076 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 4);2079 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64) 2080 { 2081 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4); 2077 2082 if (rcStrict == VINF_SUCCESS) 2078 2083 { 2079 uint8_t offOpcode = p IemCpu->offOpcode;2080 *pu64 = RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],2081 p IemCpu->abOpcode[offOpcode + 1],2082 p IemCpu->abOpcode[offOpcode + 2],2083 p IemCpu->abOpcode[offOpcode + 3]);2084 p IemCpu->offOpcode = offOpcode + 4;2084 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 2085 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2086 pVCpu->iem.s.abOpcode[offOpcode + 1], 2087 pVCpu->iem.s.abOpcode[offOpcode + 2], 2088 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2089 pVCpu->iem.s.offOpcode = offOpcode + 4; 2085 2090 } 2086 2091 else … … 2094 2099 * 2095 2100 * @returns Strict VBox status code. 2096 * @param p IemCpu The IEM state.2101 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2097 2102 * @param pu64 Where to return the opcode quad word. 2098 2103 */ 2099 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(P IEMCPU pIemCpu, uint64_t *pu64)2100 { 2101 uint8_t const offOpcode = p IemCpu->offOpcode;2102 if (RT_UNLIKELY(offOpcode + 4 > p IemCpu->cbOpcode))2103 return iemOpcodeGetNextU32ZxU64Slow(p IemCpu, pu64);2104 2105 *pu64 = RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],2106 p IemCpu->abOpcode[offOpcode + 1],2107 p IemCpu->abOpcode[offOpcode + 2],2108 p IemCpu->abOpcode[offOpcode + 3]);2109 p IemCpu->offOpcode = offOpcode + 4;2104 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64) 2105 { 2106 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 2107 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode)) 2108 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64); 2109 2110 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2111 pVCpu->iem.s.abOpcode[offOpcode + 1], 2112 pVCpu->iem.s.abOpcode[offOpcode + 2], 2113 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2114 pVCpu->iem.s.offOpcode = offOpcode + 4; 2110 2115 return VINF_SUCCESS; 2111 2116 } … … 2119 2124 * 2120 2125 * @param a_pu64 Where to return the opcode quad word. 2121 * @remark Implicitly references p IemCpu.2126 * @remark Implicitly references pVCpu. 2122 2127 */ 2123 2128 #ifndef IEM_WITH_SETJMP … … 2125 2130 do \ 2126 2131 { \ 2127 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(p IemCpu, (a_pu64)); \2132 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \ 2128 2133 if (rcStrict2 != VINF_SUCCESS) \ 2129 2134 return rcStrict2; \ 2130 2135 } while (0) 2131 2136 #else 2132 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(p IemCpu))2137 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu)) 2133 2138 #endif 2134 2139 … … 2139 2144 * 2140 2145 * @returns Strict VBox status code. 2141 * @param p IemCpu The IEM state.2146 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2142 2147 * @param pi32 Where to return the signed double word. 2143 2148 */ 2144 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(P IEMCPU pIemCpu, int32_t *pi32)2145 { 2146 return iemOpcodeGetNextU32(p IemCpu, (uint32_t *)pi32);2149 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32) 2150 { 2151 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32); 2147 2152 } 2148 2153 #endif … … 2153 2158 * 2154 2159 * @param a_pi32 Where to return the signed double word. 2155 * @remark Implicitly references p IemCpu.2160 * @remark Implicitly references pVCpu. 2156 2161 */ 2157 2162 #ifndef IEM_WITH_SETJMP … … 2159 2164 do \ 2160 2165 { \ 2161 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(p IemCpu, (a_pi32)); \2166 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \ 2162 2167 if (rcStrict2 != VINF_SUCCESS) \ 2163 2168 return rcStrict2; \ 2164 2169 } while (0) 2165 2170 #else 2166 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(p IemCpu))2171 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 2167 2172 #endif 2168 2173 … … 2173 2178 * 2174 2179 * @returns Strict VBox status code. 2175 * @param p IemCpu The IEM state.2180 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2176 2181 * @param pu64 Where to return the opcode qword. 2177 2182 */ 2178 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(P IEMCPU pIemCpu, uint64_t *pu64)2179 { 2180 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 4);2183 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64) 2184 { 2185 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4); 2181 2186 if (rcStrict == VINF_SUCCESS) 2182 2187 { 2183 uint8_t offOpcode = p IemCpu->offOpcode;2184 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],2185 p IemCpu->abOpcode[offOpcode + 1],2186 p IemCpu->abOpcode[offOpcode + 2],2187 p IemCpu->abOpcode[offOpcode + 3]);2188 p IemCpu->offOpcode = offOpcode + 4;2188 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 2189 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2190 pVCpu->iem.s.abOpcode[offOpcode + 1], 2191 pVCpu->iem.s.abOpcode[offOpcode + 2], 2192 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2193 pVCpu->iem.s.offOpcode = offOpcode + 4; 2189 2194 } 2190 2195 else … … 2198 2203 * 2199 2204 * @returns Strict VBox status code. 2200 * @param p IemCpu The IEM state.2205 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2201 2206 * @param pu64 Where to return the opcode quad word. 2202 2207 */ 2203 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(P IEMCPU pIemCpu, uint64_t *pu64)2204 { 2205 uint8_t const offOpcode = p IemCpu->offOpcode;2206 if (RT_UNLIKELY(offOpcode + 4 > p IemCpu->cbOpcode))2207 return iemOpcodeGetNextS32SxU64Slow(p IemCpu, pu64);2208 2209 int32_t i32 = RT_MAKE_U32_FROM_U8(p IemCpu->abOpcode[offOpcode],2210 p IemCpu->abOpcode[offOpcode + 1],2211 p IemCpu->abOpcode[offOpcode + 2],2212 p IemCpu->abOpcode[offOpcode + 3]);2208 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64) 2209 { 2210 uint8_t const offOpcode = pVCpu->iem.s.offOpcode; 2211 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode)) 2212 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64); 2213 2214 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2215 pVCpu->iem.s.abOpcode[offOpcode + 1], 2216 pVCpu->iem.s.abOpcode[offOpcode + 2], 2217 pVCpu->iem.s.abOpcode[offOpcode + 3]); 2213 2218 *pu64 = i32; 2214 p IemCpu->offOpcode = offOpcode + 4;2219 pVCpu->iem.s.offOpcode = offOpcode + 4; 2215 2220 return VINF_SUCCESS; 2216 2221 } … … 2224 2229 * 2225 2230 * @param a_pu64 Where to return the opcode quad word. 2226 * @remark Implicitly references p IemCpu.2231 * @remark Implicitly references pVCpu. 2227 2232 */ 2228 2233 #ifndef IEM_WITH_SETJMP … … 2230 2235 do \ 2231 2236 { \ 2232 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(p IemCpu, (a_pu64)); \2237 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \ 2233 2238 if (rcStrict2 != VINF_SUCCESS) \ 2234 2239 return rcStrict2; \ 2235 2240 } while (0) 2236 2241 #else 2237 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(p IemCpu))2242 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 2238 2243 #endif 2239 2244 … … 2244 2249 * 2245 2250 * @returns Strict VBox status code. 2246 * @param p IemCpu The IEM state.2251 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2247 2252 * @param pu64 Where to return the opcode qword. 2248 2253 */ 2249 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(P IEMCPU pIemCpu, uint64_t *pu64)2250 { 2251 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 8);2254 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64) 2255 { 2256 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8); 2252 2257 if (rcStrict == VINF_SUCCESS) 2253 2258 { 2254 uint8_t offOpcode = p IemCpu->offOpcode;2255 *pu64 = RT_MAKE_U64_FROM_U8(p IemCpu->abOpcode[offOpcode],2256 p IemCpu->abOpcode[offOpcode + 1],2257 p IemCpu->abOpcode[offOpcode + 2],2258 p IemCpu->abOpcode[offOpcode + 3],2259 p IemCpu->abOpcode[offOpcode + 4],2260 p IemCpu->abOpcode[offOpcode + 5],2261 p IemCpu->abOpcode[offOpcode + 6],2262 p IemCpu->abOpcode[offOpcode + 7]);2263 p IemCpu->offOpcode = offOpcode + 8;2259 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 2260 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2261 pVCpu->iem.s.abOpcode[offOpcode + 1], 2262 pVCpu->iem.s.abOpcode[offOpcode + 2], 2263 pVCpu->iem.s.abOpcode[offOpcode + 3], 2264 pVCpu->iem.s.abOpcode[offOpcode + 4], 2265 pVCpu->iem.s.abOpcode[offOpcode + 5], 2266 pVCpu->iem.s.abOpcode[offOpcode + 6], 2267 pVCpu->iem.s.abOpcode[offOpcode + 7]); 2268 pVCpu->iem.s.offOpcode = offOpcode + 8; 2264 2269 } 2265 2270 else … … 2273 2278 * 2274 2279 * @returns Strict VBox status code. 2275 * @param p IemCpu The IEM state.2280 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2276 2281 * @param pu64 Where to return the opcode qword. 2277 2282 */ 2278 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(P IEMCPU pIemCpu, uint64_t *pu64)2279 { 2280 uintptr_t const offOpcode = p IemCpu->offOpcode;2281 if (RT_LIKELY((uint8_t)offOpcode + 8 <= p IemCpu->cbOpcode))2282 { 2283 *pu64 = RT_MAKE_U64_FROM_U8(p IemCpu->abOpcode[offOpcode],2284 p IemCpu->abOpcode[offOpcode + 1],2285 p IemCpu->abOpcode[offOpcode + 2],2286 p IemCpu->abOpcode[offOpcode + 3],2287 p IemCpu->abOpcode[offOpcode + 4],2288 p IemCpu->abOpcode[offOpcode + 5],2289 p IemCpu->abOpcode[offOpcode + 6],2290 p IemCpu->abOpcode[offOpcode + 7]);2291 p IemCpu->offOpcode = (uint8_t)offOpcode + 8;2283 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64) 2284 { 2285 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 2286 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) 2287 { 2288 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2289 pVCpu->iem.s.abOpcode[offOpcode + 1], 2290 pVCpu->iem.s.abOpcode[offOpcode + 2], 2291 pVCpu->iem.s.abOpcode[offOpcode + 3], 2292 pVCpu->iem.s.abOpcode[offOpcode + 4], 2293 pVCpu->iem.s.abOpcode[offOpcode + 5], 2294 pVCpu->iem.s.abOpcode[offOpcode + 6], 2295 pVCpu->iem.s.abOpcode[offOpcode + 7]); 2296 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8; 2292 2297 return VINF_SUCCESS; 2293 2298 } 2294 return iemOpcodeGetNextU64Slow(p IemCpu, pu64);2299 return iemOpcodeGetNextU64Slow(pVCpu, pu64); 2295 2300 } 2296 2301 … … 2301 2306 * 2302 2307 * @returns The opcode qword. 2303 * @param p IemCpu The IEM state.2304 */ 2305 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(P IEMCPU pIemCpu)2306 { 2307 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(p IemCpu, 8);2308 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2309 */ 2310 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu) 2311 { 2312 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8); 2308 2313 if (rcStrict == VINF_SUCCESS) 2309 2314 { 2310 uint8_t offOpcode = p IemCpu->offOpcode;2311 p IemCpu->offOpcode = offOpcode + 8;2312 return RT_MAKE_U64_FROM_U8(p IemCpu->abOpcode[offOpcode],2313 p IemCpu->abOpcode[offOpcode + 1],2314 p IemCpu->abOpcode[offOpcode + 2],2315 p IemCpu->abOpcode[offOpcode + 3],2316 p IemCpu->abOpcode[offOpcode + 4],2317 p IemCpu->abOpcode[offOpcode + 5],2318 p IemCpu->abOpcode[offOpcode + 6],2319 p IemCpu->abOpcode[offOpcode + 7]);2320 } 2321 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));2315 uint8_t offOpcode = pVCpu->iem.s.offOpcode; 2316 pVCpu->iem.s.offOpcode = offOpcode + 8; 2317 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2318 pVCpu->iem.s.abOpcode[offOpcode + 1], 2319 pVCpu->iem.s.abOpcode[offOpcode + 2], 2320 pVCpu->iem.s.abOpcode[offOpcode + 3], 2321 pVCpu->iem.s.abOpcode[offOpcode + 4], 2322 pVCpu->iem.s.abOpcode[offOpcode + 5], 2323 pVCpu->iem.s.abOpcode[offOpcode + 6], 2324 pVCpu->iem.s.abOpcode[offOpcode + 7]); 2325 } 2326 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 2322 2327 } 2323 2328 … … 2327 2332 * 2328 2333 * @returns The opcode qword. 2329 * @param p IemCpu The IEM state.2330 */ 2331 DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(P IEMCPU pIemCpu)2332 { 2333 uintptr_t const offOpcode = p IemCpu->offOpcode;2334 if (RT_LIKELY((uint8_t)offOpcode + 8 <= p IemCpu->cbOpcode))2335 { 2336 p IemCpu->offOpcode = (uint8_t)offOpcode + 8;2337 return RT_MAKE_U64_FROM_U8(p IemCpu->abOpcode[offOpcode],2338 p IemCpu->abOpcode[offOpcode + 1],2339 p IemCpu->abOpcode[offOpcode + 2],2340 p IemCpu->abOpcode[offOpcode + 3],2341 p IemCpu->abOpcode[offOpcode + 4],2342 p IemCpu->abOpcode[offOpcode + 5],2343 p IemCpu->abOpcode[offOpcode + 6],2344 p IemCpu->abOpcode[offOpcode + 7]);2345 } 2346 return iemOpcodeGetNextU64SlowJmp(p IemCpu);2334 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2335 */ 2336 DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu) 2337 { 2338 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 2339 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) 2340 { 2341 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8; 2342 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 2343 pVCpu->iem.s.abOpcode[offOpcode + 1], 2344 pVCpu->iem.s.abOpcode[offOpcode + 2], 2345 pVCpu->iem.s.abOpcode[offOpcode + 3], 2346 pVCpu->iem.s.abOpcode[offOpcode + 4], 2347 pVCpu->iem.s.abOpcode[offOpcode + 5], 2348 pVCpu->iem.s.abOpcode[offOpcode + 6], 2349 pVCpu->iem.s.abOpcode[offOpcode + 7]); 2350 } 2351 return iemOpcodeGetNextU64SlowJmp(pVCpu); 2347 2352 } 2348 2353 … … 2353 2358 * 2354 2359 * @param a_pu64 Where to return the opcode quad word. 2355 * @remark Implicitly references p IemCpu.2360 * @remark Implicitly references pVCpu. 2356 2361 */ 2357 2362 #ifndef IEM_WITH_SETJMP … … 2359 2364 do \ 2360 2365 { \ 2361 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(p IemCpu, (a_pu64)); \2366 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \ 2362 2367 if (rcStrict2 != VINF_SUCCESS) \ 2363 2368 return rcStrict2; \ 2364 2369 } while (0) 2365 2370 #else 2366 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(p IemCpu) )2371 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) ) 2367 2372 #endif 2368 2373 … … 2377 2382 * 2378 2383 * @returns VBox strict status code. 2379 * @param pIemCpu The IEM per CPU instance data. 2384 * @param pVCpu The cross context virtual CPU structure of the 2385 * calling thread. 2380 2386 * @param pCtx The CPU context. 2381 2387 * @param NewSS The new SS selctor. … … 2383 2389 * @param pDesc Where to return the descriptor. 2384 2390 */ 2385 IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(P IEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)2391 IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) 2386 2392 { 2387 2393 NOREF(pCtx); … … 2392 2398 { 2393 2399 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS)); 2394 return iemRaiseTaskSwitchFault0(p IemCpu);2400 return iemRaiseTaskSwitchFault0(pVCpu); 2395 2401 } 2396 2402 … … 2399 2405 { 2400 2406 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl)); 2401 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, NewSS);2407 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS); 2402 2408 } 2403 2409 … … 2405 2411 * Read the descriptor. 2406 2412 */ 2407 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(p IemCpu, pDesc, NewSS, X86_XCPT_TS);2413 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS); 2408 2414 if (rcStrict != VINF_SUCCESS) 2409 2415 return rcStrict; … … 2415 2421 { 2416 2422 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type)); 2417 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, NewSS);2423 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS); 2418 2424 } 2419 2425 … … 2422 2428 { 2423 2429 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type)); 2424 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, NewSS);2430 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS); 2425 2431 } 2426 2432 if (pDesc->Legacy.Gen.u2Dpl != uCpl) 2427 2433 { 2428 2434 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl)); 2429 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, NewSS);2435 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS); 2430 2436 } 2431 2437 … … 2435 2441 { 2436 2442 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS)); 2437 return iemRaiseSelectorNotPresentBySelector(p IemCpu, NewSS);2443 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS); 2438 2444 } 2439 2445 … … 2446 2452 * not. 2447 2453 * 2448 * @param a_p IemCpu The IEM per CPU data.2449 * @param a_pCtx 2454 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 2455 * @param a_pCtx The CPU context. 2450 2456 */ 2451 2457 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 2452 # define IEMMISC_GET_EFL(a_p IemCpu, a_pCtx) \2453 ( IEM_VERIFICATION_ENABLED(a_p IemCpu) \2458 # define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \ 2459 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \ 2454 2460 ? (a_pCtx)->eflags.u \ 2455 : CPUMRawGetEFlags( IEMCPU_TO_VMCPU(a_pIemCpu)) )2461 : CPUMRawGetEFlags(a_pVCpu) ) 2456 2462 #else 2457 # define IEMMISC_GET_EFL(a_p IemCpu, a_pCtx) \2463 # define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \ 2458 2464 ( (a_pCtx)->eflags.u ) 2459 2465 #endif … … 2462 2468 * Updates the EFLAGS in the correct manner wrt. PATM. 2463 2469 * 2464 * @param a_p IemCpu The IEM per CPU data.2465 * @param a_pCtx 2466 * @param a_fEfl 2470 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 2471 * @param a_pCtx The CPU context. 2472 * @param a_fEfl The new EFLAGS. 2467 2473 */ 2468 2474 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 2469 # define IEMMISC_SET_EFL(a_p IemCpu, a_pCtx, a_fEfl) \2475 # define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \ 2470 2476 do { \ 2471 if (IEM_VERIFICATION_ENABLED(a_p IemCpu)) \2477 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \ 2472 2478 (a_pCtx)->eflags.u = (a_fEfl); \ 2473 2479 else \ 2474 CPUMRawSetEFlags( IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \2480 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \ 2475 2481 } while (0) 2476 2482 #else 2477 # define IEMMISC_SET_EFL(a_p IemCpu, a_pCtx, a_fEfl) \2483 # define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \ 2478 2484 do { \ 2479 2485 (a_pCtx)->eflags.u = (a_fEfl); \ … … 2513 2519 * 2514 2520 * @returns VBox strict status code. 2515 * @param p IemCpu The IEM per CPU instance data.2521 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2516 2522 * @param pCtx The CPU context. 2517 2523 * @param uCpl The CPL to load the stack for. … … 2519 2525 * @param puEsp Where to return the new stack pointer. 2520 2526 */ 2521 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(P IEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,2527 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, 2522 2528 PRTSEL pSelSS, uint32_t *puEsp) 2523 2529 { … … 2538 2544 /** @todo check actual access pattern here. */ 2539 2545 uint32_t u32Tmp = 0; /* gcc maybe... */ 2540 rcStrict = iemMemFetchSysU32(p IemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);2546 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off); 2541 2547 if (rcStrict == VINF_SUCCESS) 2542 2548 { … … 2549 2555 { 2550 2556 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit)); 2551 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(p IemCpu);2557 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu); 2552 2558 } 2553 2559 break; … … 2565 2571 /** @todo check actual access pattern here. */ 2566 2572 uint64_t u64Tmp; 2567 rcStrict = iemMemFetchSysU64(p IemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);2573 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off); 2568 2574 if (rcStrict == VINF_SUCCESS) 2569 2575 { … … 2576 2582 { 2577 2583 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit)); 2578 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(p IemCpu);2584 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu); 2579 2585 } 2580 2586 break; … … 2597 2603 * 2598 2604 * @returns VBox strict status code. 2599 * @param p IemCpu The IEM per CPU instance data.2605 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2600 2606 * @param pCtx The CPU context. 2601 2607 * @param uCpl The CPL to load the stack for. … … 2603 2609 * @param puRsp Where to return the new stack pointer. 2604 2610 */ 2605 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(P IEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)2611 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) 2606 2612 { 2607 2613 Assert(uCpl < 4); … … 2619 2625 { 2620 2626 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit)); 2621 return iemRaiseTaskSwitchFaultCurrentTSS(p IemCpu);2622 } 2623 2624 return iemMemFetchSysU64(p IemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);2627 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu); 2628 } 2629 2630 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off); 2625 2631 } 2626 2632 … … 2648 2654 * 2649 2655 * @returns VBox strict status code. 2650 * @param p IemCpu The IEM per CPU instance data.2656 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2651 2657 * @param pCtx The CPU context. 2652 2658 * @param cbInstr The number of bytes to offset rIP by in the return … … 2658 2664 */ 2659 2665 IEM_STATIC VBOXSTRICTRC 2660 iemRaiseXcptOrIntInRealMode(P IEMCPU pIemCpu,2666 iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu, 2661 2667 PCPUMCTX pCtx, 2662 2668 uint8_t cbInstr, … … 2666 2672 uint64_t uCr2) 2667 2673 { 2668 AssertReturn(p IemCpu->enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);2674 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6); 2669 2675 NOREF(uErr); NOREF(uCr2); 2670 2676 … … 2675 2681 { 2676 2682 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt)); 2677 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));2683 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 2678 2684 } 2679 2685 RTFAR16 Idte; 2680 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX, 2681 pCtx->idtr.pIdt + UINT32_C(4) * u8Vector); 2686 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector); 2682 2687 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 2683 2688 return rcStrict; … … 2688 2693 uint16_t *pu16Frame; 2689 2694 uint64_t uNewRsp; 2690 rcStrict = iemMemStackPushBeginSpecial(p IemCpu, 6, (void **)&pu16Frame, &uNewRsp);2695 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp); 2691 2696 if (rcStrict != VINF_SUCCESS) 2692 2697 return rcStrict; 2693 2698 2694 uint32_t fEfl = IEMMISC_GET_EFL(p IemCpu, pCtx);2699 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 2695 2700 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC 2696 2701 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186); 2697 if (p IemCpu->uTargetCpu <= IEMTARGETCPU_186)2702 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186) 2698 2703 fEfl |= UINT16_C(0xf000); 2699 2704 #endif … … 2701 2706 pu16Frame[1] = (uint16_t)pCtx->cs.Sel; 2702 2707 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip; 2703 rcStrict = iemMemStackPushCommitSpecial(p IemCpu, pu16Frame, uNewRsp);2708 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp); 2704 2709 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 2705 2710 return rcStrict; … … 2716 2721 pCtx->rip = Idte.off; 2717 2722 fEfl &= ~X86_EFL_IF; 2718 IEMMISC_SET_EFL(p IemCpu, pCtx, fEfl);2723 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl); 2719 2724 2720 2725 /** @todo do we actually do this in real mode? */ … … 2729 2734 * Loads a NULL data selector into when coming from V8086 mode. 2730 2735 * 2731 * @param p IemCpu The IEM per CPU instance data.2736 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2732 2737 * @param pSReg Pointer to the segment register. 2733 2738 */ 2734 IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(P IEMCPU pIemCpu, PCPUMSELREG pSReg)2739 IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg) 2735 2740 { 2736 2741 pSReg->Sel = 0; 2737 2742 pSReg->ValidSel = 0; 2738 if (IEM_IS_GUEST_CPU_INTEL(p IemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))2743 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 2739 2744 { 2740 2745 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */ … … 2755 2760 * Loads a segment selector during a task switch in V8086 mode. 2756 2761 * 2757 * @param p IemCpu The IEM per CPU instance data.2762 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2758 2763 * @param pSReg Pointer to the segment register. 2759 2764 * @param uSel The selector value to load. 2760 2765 */ 2761 IEM_STATIC void iemHlpLoadSelectorInV86Mode(P IEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)2766 IEM_STATIC void iemHlpLoadSelectorInV86Mode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel) 2762 2767 { 2763 2768 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */ … … 2775 2780 * visible parts, in protected mode. 2776 2781 * 2777 * @param p IemCpu The IEM state of the calling EMT.2782 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2778 2783 * @param pSReg Pointer to the segment register. 2779 2784 * @param uRpl The RPL. 2780 2785 */ 2781 IEM_STATIC void iemHlpLoadNullDataSelectorProt(P IEMCPU pIemCpu, PCPUMSELREG pSReg, RTSEL uRpl)2786 IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) 2782 2787 { 2783 2788 /** @todo Testcase: write a testcase checking what happends when loading a NULL … … 2786 2791 pSReg->ValidSel = uRpl; 2787 2792 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 2788 if (IEM_IS_GUEST_CPU_INTEL(p IemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))2793 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 2789 2794 { 2790 2795 /* VT-x (Intel 3960x) observed doing something like this. */ 2791 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (p IemCpu->uCpl << X86DESCATTR_DPL_SHIFT);2796 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT); 2792 2797 pSReg->u32Limit = UINT32_MAX; 2793 2798 pSReg->u64Base = 0; … … 2809 2814 * 2810 2815 * @returns VBox strict status code. 2811 * @param p IemCpu The IEM per CPU instance data.2816 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2812 2817 * @param pSReg Pointer to the segment register. 2813 2818 * @param uSel The new selector value. 2814 2819 * 2815 2820 * @remarks This does _not_ handle CS or SS. 2816 * @remarks This expects p IemCpu->uCpl to be up to date.2817 */ 2818 IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(P IEMCPU pIemCpu, PCPUMSELREG pSReg, uint16_t uSel)2819 { 2820 Assert(p IemCpu->enmCpuMode != IEMMODE_64BIT);2821 * @remarks This expects pVCpu->iem.s.uCpl to be up to date. 2822 */ 2823 IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel) 2824 { 2825 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 2821 2826 2822 2827 /* Null data selector. */ 2823 2828 if (!(uSel & X86_SEL_MASK_OFF_RPL)) 2824 2829 { 2825 iemHlpLoadNullDataSelectorProt(p IemCpu, pSReg, uSel);2826 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg));2827 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);2830 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel); 2831 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 2832 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 2828 2833 return VINF_SUCCESS; 2829 2834 } … … 2831 2836 /* Fetch the descriptor. */ 2832 2837 IEMSELDESC Desc; 2833 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(p IemCpu, &Desc, uSel, X86_XCPT_TS);2838 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS); 2834 2839 if (rcStrict != VINF_SUCCESS) 2835 2840 { … … 2845 2850 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel, 2846 2851 Desc.Legacy.Gen.u4Type)); 2847 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);2852 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 2848 2853 } 2849 2854 … … 2854 2859 /* The RPL and the new CPL must be less than or equal to the DPL. */ 2855 2860 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl 2856 || (p IemCpu->uCpl > Desc.Legacy.Gen.u2Dpl))2861 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl)) 2857 2862 { 2858 2863 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n", 2859 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, p IemCpu->uCpl));2860 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);2864 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 2865 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 2861 2866 } 2862 2867 } … … 2866 2871 { 2867 2872 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel)); 2868 return iemRaiseSelectorNotPresentWithErr(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);2873 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 2869 2874 } 2870 2875 … … 2879 2884 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2880 2885 { 2881 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uSel);2886 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel); 2882 2887 if (rcStrict != VINF_SUCCESS) 2883 2888 return rcStrict; … … 2892 2897 pSReg->ValidSel = uSel; 2893 2898 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 2894 if (IEM_IS_GUEST_CPU_INTEL(p IemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))2899 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 2895 2900 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE; 2896 2901 2897 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg));2898 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);2902 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 2903 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 2899 2904 return VINF_SUCCESS; 2900 2905 } … … 2914 2919 * 2915 2920 * @returns VBox strict status code. 2916 * @param p IemCpu The IEM per CPU instance data.2921 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2917 2922 * @param pCtx The CPU context. 2918 2923 * @param enmTaskSwitch What caused this task switch. … … 2925 2930 */ 2926 2931 IEM_STATIC VBOXSTRICTRC 2927 iemTaskSwitch(P IEMCPU pIemCpu,2932 iemTaskSwitch(PVMCPU pVCpu, 2928 2933 PCPUMCTX pCtx, 2929 2934 IEMTASKSWITCH enmTaskSwitch, … … 2935 2940 PIEMSELDESC pNewDescTSS) 2936 2941 { 2937 Assert(!IEM_IS_REAL_MODE(p IemCpu));2938 Assert(p IemCpu->enmCpuMode != IEMMODE_64BIT);2942 Assert(!IEM_IS_REAL_MODE(pVCpu)); 2943 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 2939 2944 2940 2945 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type; … … 2966 2971 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n", 2967 2972 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin)); 2968 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);2973 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL); 2969 2974 } 2970 2975 … … 2983 2988 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n", 2984 2989 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin)); 2985 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, SelTSS & X86_SEL_MASK_OFF_RPL);2990 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL); 2986 2991 } 2987 2992 … … 2997 3002 * not perform correct translation if this happens. See Intel spec. 7.2.1 2998 3003 * "Task-State Segment" */ 2999 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);3004 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW); 3000 3005 if (rcStrict != VINF_SUCCESS) 3001 3006 { … … 3013 3018 { 3014 3019 PX86DESC pDescCurTSS; 3015 rcStrict = iemMemMap(p IemCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,3020 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX, 3016 3021 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW); 3017 3022 if (rcStrict != VINF_SUCCESS) … … 3023 3028 3024 3029 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 3025 rcStrict = iemMemCommitAndUnmap(p IemCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);3030 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW); 3026 3031 if (rcStrict != VINF_SUCCESS) 3027 3032 { … … 3060 3065 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip); 3061 3066 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64); 3062 rcStrict = iemMemMap(p IemCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);3067 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW); 3063 3068 if (rcStrict != VINF_SUCCESS) 3064 3069 { … … 3087 3092 pCurTSS32->gs = pCtx->gs.Sel; 3088 3093 3089 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);3094 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW); 3090 3095 if (rcStrict != VINF_SUCCESS) 3091 3096 { … … 3104 3109 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip); 3105 3110 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28); 3106 rcStrict = iemMemMap(p IemCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);3111 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW); 3107 3112 if (rcStrict != VINF_SUCCESS) 3108 3113 { … … 3129 3134 pCurTSS16->ds = pCtx->ds.Sel; 3130 3135 3131 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);3136 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW); 3132 3137 if (rcStrict != VINF_SUCCESS) 3133 3138 { … … 3210 3215 * We're done accessing the new TSS. 3211 3216 */ 3212 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvNewTSS, IEM_ACCESS_SYS_RW);3217 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW); 3213 3218 if (rcStrict != VINF_SUCCESS) 3214 3219 { … … 3222 3227 if (enmTaskSwitch != IEMTASKSWITCH_IRET) 3223 3228 { 3224 rcStrict = iemMemMap(p IemCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,3229 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX, 3225 3230 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW); 3226 3231 if (rcStrict != VINF_SUCCESS) … … 3237 3242 3238 3243 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 3239 rcStrict = iemMemCommitAndUnmap(p IemCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);3244 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW); 3240 3245 if (rcStrict != VINF_SUCCESS) 3241 3246 { … … 3256 3261 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy); 3257 3262 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy); 3258 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_TR);3263 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR); 3259 3264 3260 3265 /* Set the busy bit in TR. */ … … 3269 3274 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */ 3270 3275 pCtx->cr0 |= X86_CR0_TS; 3271 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR0);3276 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0); 3272 3277 3273 3278 pCtx->eip = uNewEip; … … 3283 3288 uNewEflags &= X86_EFL_LIVE_MASK; 3284 3289 uNewEflags |= X86_EFL_RA1_MASK; 3285 IEMMISC_SET_EFL(p IemCpu, pCtx, uNewEflags);3290 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags); 3286 3291 3287 3292 /* … … 3313 3318 pCtx->gs.fFlags = CPUMSELREG_FLAGS_STALE; 3314 3319 pCtx->gs.Attr.u &= ~X86DESCATTR_P; 3315 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);3320 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 3316 3321 3317 3322 pCtx->ldtr.Sel = uNewLdt; 3318 3323 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE; 3319 3324 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P; 3320 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_LDTR);3321 3322 if (IEM_IS_GUEST_CPU_INTEL(p IemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))3325 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR); 3326 3327 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 3323 3328 { 3324 3329 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE; … … 3338 3343 { 3339 3344 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */ 3340 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))3345 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 3341 3346 { 3342 int rc = CPUMSetGuestCR3( IEMCPU_TO_VMCPU(pIemCpu), uNewCr3);3347 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3); 3343 3348 AssertRCSuccessReturn(rc, rc); 3344 3349 } … … 3347 3352 3348 3353 /* Inform PGM. */ 3349 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))3354 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 3350 3355 { 3351 int rc = PGMFlushTLB( IEMCPU_TO_VMCPU(pIemCpu), pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));3356 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE)); 3352 3357 AssertRCReturn(rc, rc); 3353 3358 /* ignore informational status codes */ 3354 3359 } 3355 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_CR3);3360 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3); 3356 3361 } 3357 3362 … … 3360 3365 */ 3361 3366 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL)) 3362 iemHlpLoadNullDataSelectorProt(p IemCpu, &pCtx->ldtr, uNewLdt);3367 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt); 3363 3368 else 3364 3369 { … … 3366 3371 3367 3372 IEMSELDESC DescNewLdt; 3368 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);3373 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS); 3369 3374 if (rcStrict != VINF_SUCCESS) 3370 3375 { … … 3379 3384 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch, 3380 3385 uNewLdt, DescNewLdt.Legacy.u)); 3381 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);3386 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 3382 3387 } 3383 3388 … … 3387 3392 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy); 3388 3393 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy); 3389 if (IEM_IS_GUEST_CPU_INTEL(p IemCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))3394 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 3390 3395 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE; 3391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ldtr));3396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr)); 3392 3397 } 3393 3398 3394 3399 IEMSELDESC DescSS; 3395 if (IEM_IS_V86_MODE(p IemCpu))3396 { 3397 p IemCpu->uCpl = 3;3398 iemHlpLoadSelectorInV86Mode(p IemCpu, &pCtx->es, uNewES);3399 iemHlpLoadSelectorInV86Mode(p IemCpu, &pCtx->cs, uNewCS);3400 iemHlpLoadSelectorInV86Mode(p IemCpu, &pCtx->ss, uNewSS);3401 iemHlpLoadSelectorInV86Mode(p IemCpu, &pCtx->ds, uNewDS);3402 iemHlpLoadSelectorInV86Mode(p IemCpu, &pCtx->fs, uNewFS);3403 iemHlpLoadSelectorInV86Mode(p IemCpu, &pCtx->gs, uNewGS);3400 if (IEM_IS_V86_MODE(pVCpu)) 3401 { 3402 pVCpu->iem.s.uCpl = 3; 3403 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->es, uNewES); 3404 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->cs, uNewCS); 3405 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ss, uNewSS); 3406 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ds, uNewDS); 3407 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->fs, uNewFS); 3408 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->gs, uNewGS); 3404 3409 } 3405 3410 else … … 3413 3418 { 3414 3419 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS)); 3415 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3420 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 3416 3421 } 3417 3422 3418 3423 /* Fetch the descriptor. */ 3419 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescSS, uNewSS, X86_XCPT_TS);3424 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS); 3420 3425 if (rcStrict != VINF_SUCCESS) 3421 3426 { … … 3432 3437 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n", 3433 3438 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type)); 3434 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3439 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 3435 3440 } 3436 3441 … … 3441 3446 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl, 3442 3447 uNewCpl)); 3443 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3448 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 3444 3449 } 3445 3450 … … 3448 3453 { 3449 3454 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS)); 3450 return iemRaiseSelectorNotPresentWithErr(p IemCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3455 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL); 3451 3456 } 3452 3457 … … 3457 3462 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3458 3463 { 3459 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewSS);3464 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS); 3460 3465 if (rcStrict != VINF_SUCCESS) 3461 3466 return rcStrict; … … 3470 3475 pCtx->ss.u64Base = u64Base; 3471 3476 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 3472 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));3477 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss)); 3473 3478 3474 3479 /* CPL has changed, update IEM before loading rest of segments. */ 3475 p IemCpu->uCpl = uNewCpl;3480 pVCpu->iem.s.uCpl = uNewCpl; 3476 3481 3477 3482 /* 3478 3483 * Load the data segments for the new task. 3479 3484 */ 3480 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(p IemCpu, &pCtx->es, uNewES);3485 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES); 3481 3486 if (rcStrict != VINF_SUCCESS) 3482 3487 return rcStrict; 3483 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(p IemCpu, &pCtx->ds, uNewDS);3488 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS); 3484 3489 if (rcStrict != VINF_SUCCESS) 3485 3490 return rcStrict; 3486 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(p IemCpu, &pCtx->fs, uNewFS);3491 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS); 3487 3492 if (rcStrict != VINF_SUCCESS) 3488 3493 return rcStrict; 3489 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(p IemCpu, &pCtx->gs, uNewGS);3494 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS); 3490 3495 if (rcStrict != VINF_SUCCESS) 3491 3496 return rcStrict; … … 3497 3502 { 3498 3503 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS)); 3499 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3504 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3500 3505 } 3501 3506 3502 3507 /* Fetch the descriptor. */ 3503 3508 IEMSELDESC DescCS; 3504 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescCS, uNewCS, X86_XCPT_TS);3509 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS); 3505 3510 if (rcStrict != VINF_SUCCESS) 3506 3511 { … … 3515 3520 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS, 3516 3521 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type)); 3517 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3522 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3518 3523 } 3519 3524 … … 3524 3529 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type, 3525 3530 DescCS.Legacy.Gen.u2Dpl)); 3526 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3527 3532 } 3528 3533 … … 3533 3538 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, 3534 3539 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl)); 3535 return iemRaiseTaskSwitchFaultWithErr(p IemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3540 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3536 3541 } 3537 3542 … … 3540 3545 { 3541 3546 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS)); 3542 return iemRaiseSelectorNotPresentWithErr(p IemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3547 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 3543 3548 } 3544 3549 … … 3549 3554 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3550 3555 { 3551 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCS);3556 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS); 3552 3557 if (rcStrict != VINF_SUCCESS) 3553 3558 return rcStrict; … … 3562 3567 pCtx->cs.u64Base = u64Base; 3563 3568 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 3564 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), &pCtx->cs));3569 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs)); 3565 3570 } 3566 3571 … … 3602 3607 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp, 3603 3608 cbStackFrame)); 3604 return iemRaiseStackSelectorNotPresentWithErr(p IemCpu, uExt);3609 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt); 3605 3610 } 3606 3611 } … … 3612 3617 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp, 3613 3618 cbStackFrame)); 3614 return iemRaiseStackSelectorNotPresentWithErr(p IemCpu, uExt);3619 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt); 3615 3620 } 3616 3621 } … … 3618 3623 3619 3624 if (fIsNewTSS386) 3620 rcStrict = iemMemStackPushU32(p IemCpu, uErr);3625 rcStrict = iemMemStackPushU32(pVCpu, uErr); 3621 3626 else 3622 rcStrict = iemMemStackPushU16(p IemCpu, uErr);3627 rcStrict = iemMemStackPushU16(pVCpu, uErr); 3623 3628 if (rcStrict != VINF_SUCCESS) 3624 3629 { … … 3635 3640 pCtx->eip, pCtx->cs.u32Limit)); 3636 3641 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */ 3637 return iemRaiseGeneralProtectionFault(p IemCpu, uExt);3642 return iemRaiseGeneralProtectionFault(pVCpu, uExt); 3638 3643 } 3639 3644 … … 3647 3652 * 3648 3653 * @returns VBox strict status code. 3649 * @param p IemCpu The IEM per CPU instance data.3654 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3650 3655 * @param pCtx The CPU context. 3651 3656 * @param cbInstr The number of bytes to offset rIP by in the return … … 3657 3662 */ 3658 3663 IEM_STATIC VBOXSTRICTRC 3659 iemRaiseXcptOrIntInProtMode(P IEMCPU pIemCpu,3664 iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu, 3660 3665 PCPUMCTX pCtx, 3661 3666 uint8_t cbInstr, … … 3671 3676 { 3672 3677 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt)); 3673 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3678 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 3674 3679 } 3675 3680 X86DESC Idte; 3676 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(p IemCpu, &Idte.u, UINT8_MAX,3681 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX, 3677 3682 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector); 3678 3683 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) … … 3689 3694 { 3690 3695 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type)); 3691 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3696 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 3692 3697 } 3693 3698 bool fTaskGate = false; … … 3711 3716 * esp. call gates. */ 3712 3717 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type)); 3713 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3718 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 3714 3719 } 3715 3720 … … 3738 3743 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) 3739 3744 { 3740 if (p IemCpu->uCpl > Idte.Gate.u2Dpl)3745 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl) 3741 3746 { 3742 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, p IemCpu->uCpl, Idte.Gate.u2Dpl));3743 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3747 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl)); 3748 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 3744 3749 } 3745 3750 } … … 3749 3754 { 3750 3755 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector)); 3751 return iemRaiseSelectorNotPresentWithErr(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3756 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 3752 3757 } 3753 3758 … … 3767 3772 */ 3768 3773 IEMSELDESC DescTSS; 3769 rcStrict = iemMemFetchSelDescWithErr(p IemCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);3774 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt); 3770 3775 if (rcStrict != VINF_SUCCESS) 3771 3776 { … … 3782 3787 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n", 3783 3788 u8Vector, SelTSS, DescTSS.Legacy.au64)); 3784 return iemRaiseGeneralProtectionFault(p IemCpu, (SelTSS & uSelMask) | uExt);3789 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt); 3785 3790 } 3786 3791 … … 3789 3794 { 3790 3795 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64)); 3791 return iemRaiseSelectorNotPresentWithErr(p IemCpu, (SelTSS & uSelMask) | uExt);3796 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt); 3792 3797 } 3793 3798 3794 3799 /* Do the actual task switch. */ 3795 return iemTaskSwitch(p IemCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);3800 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS); 3796 3801 } 3797 3802 … … 3801 3806 { 3802 3807 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS)); 3803 return iemRaiseGeneralProtectionFault0(p IemCpu);3808 return iemRaiseGeneralProtectionFault0(pVCpu); 3804 3809 } 3805 3810 3806 3811 /* Fetch the descriptor for the new CS. */ 3807 3812 IEMSELDESC DescCS; 3808 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */3813 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */ 3809 3814 if (rcStrict != VINF_SUCCESS) 3810 3815 { … … 3817 3822 { 3818 3823 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type)); 3819 return iemRaiseGeneralProtectionFault(p IemCpu, NewCS & X86_SEL_MASK_OFF_RPL);3824 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL); 3820 3825 } 3821 3826 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 3822 3827 { 3823 3828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type)); 3824 return iemRaiseGeneralProtectionFault(p IemCpu, NewCS & X86_SEL_MASK_OFF_RPL);3829 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL); 3825 3830 } 3826 3831 … … 3830 3835 * same-privilege stack behavior further down. A testcase would 3831 3836 * be nice. */ 3832 if (DescCS.Legacy.Gen.u2Dpl > p IemCpu->uCpl)3837 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl) 3833 3838 { 3834 3839 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n", 3835 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, p IemCpu->uCpl));3836 return iemRaiseGeneralProtectionFault(p IemCpu, NewCS & X86_SEL_MASK_OFF_RPL);3840 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 3841 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL); 3837 3842 } 3838 3843 … … 3841 3846 { 3842 3847 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS)); 3843 return iemRaiseSelectorNotPresentBySelector(p IemCpu, NewCS);3848 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS); 3844 3849 } 3845 3850 … … 3854 3859 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n", 3855 3860 u8Vector, uNewEip, cbLimitCS, NewCS)); 3856 return iemRaiseGeneralProtectionFault(p IemCpu, 0);3861 return iemRaiseGeneralProtectionFault(pVCpu, 0); 3857 3862 } 3858 3863 3859 3864 /* Calc the flag image to push. */ 3860 uint32_t fEfl = IEMMISC_GET_EFL(p IemCpu, pCtx);3865 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 3861 3866 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) 3862 3867 fEfl &= ~X86_EFL_RF; 3863 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(p IemCpu))3868 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 3864 3869 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */ 3865 3870 3866 3871 /* From V8086 mode only go to CPL 0. */ 3867 3872 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF 3868 ? p IemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;3873 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl; 3869 3874 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */ 3870 3875 { 3871 3876 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl)); 3872 return iemRaiseGeneralProtectionFault(p IemCpu, 0);3877 return iemRaiseGeneralProtectionFault(pVCpu, 0); 3873 3878 } 3874 3879 … … 3877 3882 * This in turns means validating the new SS and ESP... 3878 3883 */ 3879 if (uNewCpl != p IemCpu->uCpl)3884 if (uNewCpl != pVCpu->iem.s.uCpl) 3880 3885 { 3881 3886 RTSEL NewSS; 3882 3887 uint32_t uNewEsp; 3883 rcStrict = iemRaiseLoadStackFromTss32Or16(p IemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);3888 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp); 3884 3889 if (rcStrict != VINF_SUCCESS) 3885 3890 return rcStrict; 3886 3891 3887 3892 IEMSELDESC DescSS; 3888 rcStrict = iemMiscValidateNewSS(p IemCpu, pCtx, NewSS, uNewCpl, &DescSS);3893 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS); 3889 3894 if (rcStrict != VINF_SUCCESS) 3890 3895 return rcStrict; … … 3903 3908 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n", 3904 3909 u8Vector, NewSS, uNewEsp, cbStackFrame)); 3905 return iemRaiseSelectorBoundsBySelector(p IemCpu, NewSS);3910 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS); 3906 3911 } 3907 3912 } … … 3913 3918 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n", 3914 3919 u8Vector, NewSS, uNewEsp, cbStackFrame)); 3915 return iemRaiseSelectorBoundsBySelector(p IemCpu, NewSS);3920 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS); 3916 3921 } 3917 3922 } … … 3922 3927 3923 3928 /* Set the new CPL so that stack accesses use it. */ 3924 uint8_t const uOldCpl = p IemCpu->uCpl;3925 p IemCpu->uCpl = uNewCpl;3929 uint8_t const uOldCpl = pVCpu->iem.s.uCpl; 3930 pVCpu->iem.s.uCpl = uNewCpl; 3926 3931 3927 3932 /* Create the stack frame. */ 3928 3933 RTPTRUNION uStackFrame; 3929 rcStrict = iemMemMap(p IemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,3934 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 3930 3935 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 3931 3936 if (rcStrict != VINF_SUCCESS) … … 3968 3973 } 3969 3974 } 3970 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);3975 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); 3971 3976 if (rcStrict != VINF_SUCCESS) 3972 3977 return rcStrict; … … 3978 3983 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3979 3984 { 3980 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, NewCS);3985 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS); 3981 3986 if (rcStrict != VINF_SUCCESS) 3982 3987 return rcStrict; … … 3986 3991 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3987 3992 { 3988 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, NewSS);3993 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS); 3989 3994 if (rcStrict != VINF_SUCCESS) 3990 3995 return rcStrict; … … 4014 4019 if (fEfl & X86_EFL_VM) 4015 4020 { 4016 iemHlpLoadNullDataSelectorOnV86Xcpt(p IemCpu, &pCtx->gs);4017 iemHlpLoadNullDataSelectorOnV86Xcpt(p IemCpu, &pCtx->fs);4018 iemHlpLoadNullDataSelectorOnV86Xcpt(p IemCpu, &pCtx->es);4019 iemHlpLoadNullDataSelectorOnV86Xcpt(p IemCpu, &pCtx->ds);4021 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs); 4022 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs); 4023 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es); 4024 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds); 4020 4025 } 4021 4026 } … … 4028 4033 RTPTRUNION uStackFrame; 4029 4034 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate; 4030 rcStrict = iemMemStackPushBeginSpecial(p IemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);4035 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp); 4031 4036 if (rcStrict != VINF_SUCCESS) 4032 4037 return rcStrict; … … 4038 4043 *uStackFrame.pu32++ = uErr; 4039 4044 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip; 4040 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | p IemCpu->uCpl;4045 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl; 4041 4046 uStackFrame.pu32[2] = fEfl; 4042 4047 } … … 4046 4051 *uStackFrame.pu16++ = uErr; 4047 4052 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip; 4048 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | p IemCpu->uCpl;4053 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl; 4049 4054 uStackFrame.pu16[2] = fEfl; 4050 4055 } 4051 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */4056 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */ 4052 4057 if (rcStrict != VINF_SUCCESS) 4053 4058 return rcStrict; … … 4056 4061 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 4057 4062 { 4058 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, NewCS);4063 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS); 4059 4064 if (rcStrict != VINF_SUCCESS) 4060 4065 return rcStrict; … … 4078 4083 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */ 4079 4084 fEfl &= ~fEflToClear; 4080 IEMMISC_SET_EFL(p IemCpu, pCtx, fEfl);4085 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl); 4081 4086 4082 4087 if (fFlags & IEM_XCPT_FLAGS_CR2) … … 4094 4099 * 4095 4100 * @returns VBox strict status code. 4096 * @param p IemCpu The IEM per CPU instance data.4101 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4097 4102 * @param pCtx The CPU context. 4098 4103 * @param cbInstr The number of bytes to offset rIP by in the return … … 4104 4109 */ 4105 4110 IEM_STATIC VBOXSTRICTRC 4106 iemRaiseXcptOrIntInLongMode(P IEMCPU pIemCpu,4111 iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu, 4107 4112 PCPUMCTX pCtx, 4108 4113 uint8_t cbInstr, … … 4119 4124 { 4120 4125 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt)); 4121 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4126 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 4122 4127 } 4123 4128 X86DESC64 Idte; 4124 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(p IemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);4129 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt); 4125 4130 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 4126 rcStrict = iemMemFetchSysU64(p IemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);4131 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8); 4127 4132 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 4128 4133 return rcStrict; … … 4138 4143 { 4139 4144 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type)); 4140 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4145 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 4141 4146 } 4142 4147 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM; … … 4151 4156 default: 4152 4157 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type)); 4153 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 4154 4159 } 4155 4160 … … 4157 4162 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) 4158 4163 { 4159 if (p IemCpu->uCpl > Idte.Gate.u2Dpl)4164 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl) 4160 4165 { 4161 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, p IemCpu->uCpl, Idte.Gate.u2Dpl));4162 return iemRaiseGeneralProtectionFault(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4166 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl)); 4167 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 4163 4168 } 4164 4169 } … … 4168 4173 { 4169 4174 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector)); 4170 return iemRaiseSelectorNotPresentWithErr(p IemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4175 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 4171 4176 } 4172 4177 … … 4176 4181 { 4177 4182 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS)); 4178 return iemRaiseGeneralProtectionFault0(p IemCpu);4183 return iemRaiseGeneralProtectionFault0(pVCpu); 4179 4184 } 4180 4185 4181 4186 /* Fetch the descriptor for the new CS. */ 4182 4187 IEMSELDESC DescCS; 4183 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescCS, NewCS, X86_XCPT_GP);4188 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); 4184 4189 if (rcStrict != VINF_SUCCESS) 4185 4190 { … … 4192 4197 { 4193 4198 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type)); 4194 return iemRaiseGeneralProtectionFault(p IemCpu, NewCS & X86_SEL_MASK_OFF_RPL);4199 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL); 4195 4200 } 4196 4201 if ( !DescCS.Long.Gen.u1Long … … 4200 4205 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n", 4201 4206 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig)); 4202 return iemRaiseGeneralProtectionFault(p IemCpu, NewCS & X86_SEL_MASK_OFF_RPL);4207 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL); 4203 4208 } 4204 4209 … … 4209 4214 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched 4210 4215 * when CPU in Ring-0. Result \#GP? */ 4211 if (DescCS.Legacy.Gen.u2Dpl > p IemCpu->uCpl)4216 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl) 4212 4217 { 4213 4218 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n", 4214 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, p IemCpu->uCpl));4215 return iemRaiseGeneralProtectionFault(p IemCpu, NewCS & X86_SEL_MASK_OFF_RPL);4219 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 4220 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL); 4216 4221 } 4217 4222 … … 4221 4226 { 4222 4227 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS)); 4223 return iemRaiseSelectorNotPresentBySelector(p IemCpu, NewCS);4228 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS); 4224 4229 } 4225 4230 … … 4231 4236 { 4232 4237 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip)); 4233 return iemRaiseGeneralProtectionFault0(p IemCpu);4238 return iemRaiseGeneralProtectionFault0(pVCpu); 4234 4239 } 4235 4240 … … 4240 4245 uint64_t uNewRsp; 4241 4246 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF 4242 ? p IemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl;4243 if ( uNewCpl != p IemCpu->uCpl4247 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl; 4248 if ( uNewCpl != pVCpu->iem.s.uCpl 4244 4249 || Idte.Gate.u3IST != 0) 4245 4250 { 4246 rcStrict = iemRaiseLoadStackFromTss64(p IemCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);4251 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp); 4247 4252 if (rcStrict != VINF_SUCCESS) 4248 4253 return rcStrict; … … 4255 4260 * Calc the flag image to push. 4256 4261 */ 4257 uint32_t fEfl = IEMMISC_GET_EFL(p IemCpu, pCtx);4262 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 4258 4263 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) 4259 4264 fEfl &= ~X86_EFL_RF; 4260 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(p IemCpu))4265 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 4261 4266 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */ 4262 4267 … … 4265 4270 */ 4266 4271 /* Set the new CPL so that stack accesses use it. */ 4267 uint8_t const uOldCpl = p IemCpu->uCpl;4268 p IemCpu->uCpl = uNewCpl;4272 uint8_t const uOldCpl = pVCpu->iem.s.uCpl; 4273 pVCpu->iem.s.uCpl = uNewCpl; 4269 4274 4270 4275 /* Create the stack frame. */ 4271 4276 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR)); 4272 4277 RTPTRUNION uStackFrame; 4273 rcStrict = iemMemMap(p IemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,4278 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 4274 4279 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 4275 4280 if (rcStrict != VINF_SUCCESS) … … 4284 4289 uStackFrame.pu64[3] = pCtx->rsp; 4285 4290 uStackFrame.pu64[4] = pCtx->ss.Sel; 4286 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);4291 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); 4287 4292 if (rcStrict != VINF_SUCCESS) 4288 4293 return rcStrict; … … 4294 4299 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 4295 4300 { 4296 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, NewCS);4301 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS); 4297 4302 if (rcStrict != VINF_SUCCESS) 4298 4303 return rcStrict; … … 4324 4329 4325 4330 fEfl &= ~fEflToClear; 4326 IEMMISC_SET_EFL(p IemCpu, pCtx, fEfl);4331 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl); 4327 4332 4328 4333 if (fFlags & IEM_XCPT_FLAGS_CR2) … … 4342 4347 * 4343 4348 * @returns VBox strict status code. 4344 * @param p IemCpu The IEM per CPU instance data.4349 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4345 4350 * @param cbInstr The number of bytes to offset rIP by in the return 4346 4351 * address. … … 4351 4356 */ 4352 4357 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) 4353 iemRaiseXcptOrInt(P IEMCPU pIemCpu,4358 iemRaiseXcptOrInt(PVMCPU pVCpu, 4354 4359 uint8_t cbInstr, 4355 4360 uint8_t u8Vector, … … 4358 4363 uint64_t uCr2) 4359 4364 { 4360 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4365 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4361 4366 #ifdef IN_RING0 4362 int rc = HMR0EnsureCompleteBasicContext( IEMCPU_TO_VMCPU(pIemCpu), pCtx);4367 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx); 4363 4368 AssertRCReturn(rc, rc); 4364 4369 #endif … … 4367 4372 * Flush prefetch buffer 4368 4373 */ 4369 p IemCpu->cbOpcode = pIemCpu->offOpcode;4374 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 4370 4375 4371 4376 /* … … 4383 4388 } 4384 4389 #ifdef DBGFTRACE_ENABLED 4385 RTTraceBufAddMsgF( IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",4386 p IemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,4390 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx", 4391 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2, 4387 4392 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp); 4388 4393 #endif … … 4391 4396 * Do recursion accounting. 4392 4397 */ 4393 uint8_t const uPrevXcpt = p IemCpu->uCurXcpt;4394 uint32_t const fPrevXcpt = p IemCpu->fCurXcpt;4395 if (p IemCpu->cXcptRecursions == 0)4398 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt; 4399 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt; 4400 if (pVCpu->iem.s.cXcptRecursions == 0) 4396 4401 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n", 4397 4402 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2)); … … 4399 4404 { 4400 4405 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n", 4401 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, p IemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));4406 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt)); 4402 4407 4403 4408 /** @todo double and tripple faults. */ 4404 if (p IemCpu->cXcptRecursions >= 3)4409 if (pVCpu->iem.s.cXcptRecursions >= 3) 4405 4410 { 4406 4411 #ifdef DEBUG_bird … … 4416 4421 } */ 4417 4422 } 4418 p IemCpu->cXcptRecursions++;4419 p IemCpu->uCurXcpt = u8Vector;4420 p IemCpu->fCurXcpt = fFlags;4423 pVCpu->iem.s.cXcptRecursions++; 4424 pVCpu->iem.s.uCurXcpt = u8Vector; 4425 pVCpu->iem.s.fCurXcpt = fFlags; 4421 4426 4422 4427 /* … … 4426 4431 if (LogIs3Enabled()) 4427 4432 { 4428 PVM pVM = IEMCPU_TO_VM(pIemCpu); 4429 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 4433 PVM pVM = pVCpu->CTX_SUFF(pVM); 4430 4434 char szRegs[4096]; 4431 4435 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), … … 4469 4473 VBOXSTRICTRC rcStrict; 4470 4474 if (!(pCtx->cr0 & X86_CR0_PE)) 4471 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);4475 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); 4472 4476 else if (pCtx->msrEFER & MSR_K6_EFER_LMA) 4473 rcStrict = iemRaiseXcptOrIntInLongMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);4477 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); 4474 4478 else 4475 rcStrict = iemRaiseXcptOrIntInProtMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);4479 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); 4476 4480 4477 4481 /* Flush the prefetch buffer. */ 4478 p IemCpu->cbOpcode = pIemCpu->offOpcode;4482 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 4479 4483 4480 4484 /* 4481 4485 * Unwind. 4482 4486 */ 4483 p IemCpu->cXcptRecursions--;4484 p IemCpu->uCurXcpt = uPrevXcpt;4485 p IemCpu->fCurXcpt = fPrevXcpt;4487 pVCpu->iem.s.cXcptRecursions--; 4488 pVCpu->iem.s.uCurXcpt = uPrevXcpt; 4489 pVCpu->iem.s.fCurXcpt = fPrevXcpt; 4486 4490 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n", 4487 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, p IemCpu->uCpl));4491 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl)); 4488 4492 return rcStrict; 4489 4493 } … … 4494 4498 */ 4495 4499 IEM_STATIC DECL_NO_RETURN(void) 4496 iemRaiseXcptOrIntJmp(P IEMCPU pIemCpu,4500 iemRaiseXcptOrIntJmp(PVMCPU pVCpu, 4497 4501 uint8_t cbInstr, 4498 4502 uint8_t u8Vector, … … 4501 4505 uint64_t uCr2) 4502 4506 { 4503 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(p IemCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4504 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));4507 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2); 4508 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 4505 4509 } 4506 4510 #endif … … 4508 4512 4509 4513 /** \#DE - 00. */ 4510 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(P IEMCPU pIemCpu)4511 { 4512 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4514 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu) 4515 { 4516 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4513 4517 } 4514 4518 … … 4516 4520 /** \#DB - 01. 4517 4521 * @note This automatically clear DR7.GD. */ 4518 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(P IEMCPU pIemCpu)4522 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu) 4519 4523 { 4520 4524 /** @todo set/clear RF. */ 4521 p IemCpu->CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD;4522 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4525 pVCpu->iem.s.CTX_SUFF(pCtx)->dr[7] &= ~X86_DR7_GD; 4526 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4523 4527 } 4524 4528 4525 4529 4526 4530 /** \#UD - 06. */ 4527 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(P IEMCPU pIemCpu)4528 { 4529 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4531 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu) 4532 { 4533 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4530 4534 } 4531 4535 4532 4536 4533 4537 /** \#NM - 07. */ 4534 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(P IEMCPU pIemCpu)4535 { 4536 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4538 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu) 4539 { 4540 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4537 4541 } 4538 4542 4539 4543 4540 4544 /** \#TS(err) - 0a. */ 4541 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(P IEMCPU pIemCpu, uint16_t uErr)4542 { 4543 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4545 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr) 4546 { 4547 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0); 4544 4548 } 4545 4549 4546 4550 4547 4551 /** \#TS(tr) - 0a. */ 4548 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(P IEMCPU pIemCpu)4549 { 4550 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4551 p IemCpu->CTX_SUFF(pCtx)->tr.Sel, 0);4552 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu) 4553 { 4554 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 4555 pVCpu->iem.s.CTX_SUFF(pCtx)->tr.Sel, 0); 4552 4556 } 4553 4557 4554 4558 4555 4559 /** \#TS(0) - 0a. */ 4556 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(P IEMCPU pIemCpu)4557 { 4558 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4560 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu) 4561 { 4562 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 4559 4563 0, 0); 4560 4564 } … … 4562 4566 4563 4567 /** \#TS(err) - 0a. */ 4564 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(P IEMCPU pIemCpu, uint16_t uSel)4565 { 4566 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4568 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel) 4569 { 4570 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 4567 4571 uSel & X86_SEL_MASK_OFF_RPL, 0); 4568 4572 } … … 4570 4574 4571 4575 /** \#NP(err) - 0b. */ 4572 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(P IEMCPU pIemCpu, uint16_t uErr)4573 { 4574 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4576 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr) 4577 { 4578 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0); 4575 4579 } 4576 4580 4577 4581 4578 4582 /** \#NP(seg) - 0b. */ 4579 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(P IEMCPU pIemCpu, uint32_t iSegReg)4580 { 4581 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4582 iemSRegFetchU16(p IemCpu, iSegReg) & ~X86_SEL_RPL, 0);4583 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg) 4584 { 4585 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 4586 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0); 4583 4587 } 4584 4588 4585 4589 4586 4590 /** \#NP(sel) - 0b. */ 4587 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(P IEMCPU pIemCpu, uint16_t uSel)4588 { 4589 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4591 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel) 4592 { 4593 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 4590 4594 uSel & ~X86_SEL_RPL, 0); 4591 4595 } … … 4593 4597 4594 4598 /** \#SS(seg) - 0c. */ 4595 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(P IEMCPU pIemCpu, uint16_t uSel)4596 { 4597 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4599 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel) 4600 { 4601 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 4598 4602 uSel & ~X86_SEL_RPL, 0); 4599 4603 } … … 4601 4605 4602 4606 /** \#SS(err) - 0c. */ 4603 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(P IEMCPU pIemCpu, uint16_t uErr)4604 { 4605 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4607 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr) 4608 { 4609 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0); 4606 4610 } 4607 4611 4608 4612 4609 4613 /** \#GP(n) - 0d. */ 4610 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(P IEMCPU pIemCpu, uint16_t uErr)4611 { 4612 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4614 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr) 4615 { 4616 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0); 4613 4617 } 4614 4618 4615 4619 4616 4620 /** \#GP(0) - 0d. */ 4617 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(P IEMCPU pIemCpu)4618 { 4619 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4621 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu) 4622 { 4623 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4620 4624 } 4621 4625 4622 4626 #ifdef IEM_WITH_SETJMP 4623 4627 /** \#GP(0) - 0d. */ 4624 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(P IEMCPU pIemCpu)4625 { 4626 iemRaiseXcptOrIntJmp(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4628 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu) 4629 { 4630 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4627 4631 } 4628 4632 #endif … … 4630 4634 4631 4635 /** \#GP(sel) - 0d. */ 4632 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(P IEMCPU pIemCpu, RTSEL Sel)4633 { 4634 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4636 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel) 4637 { 4638 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 4635 4639 Sel & ~X86_SEL_RPL, 0); 4636 4640 } … … 4638 4642 4639 4643 /** \#GP(0) - 0d. */ 4640 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(P IEMCPU pIemCpu)4641 { 4642 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4644 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu) 4645 { 4646 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4643 4647 } 4644 4648 4645 4649 4646 4650 /** \#GP(sel) - 0d. */ 4647 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(P IEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)4651 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess) 4648 4652 { 4649 4653 NOREF(iSegReg); NOREF(fAccess); 4650 return iemRaiseXcptOrInt(p IemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,4654 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP, 4651 4655 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4652 4656 } … … 4654 4658 #ifdef IEM_WITH_SETJMP 4655 4659 /** \#GP(sel) - 0d, longjmp. */ 4656 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(P IEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)4660 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess) 4657 4661 { 4658 4662 NOREF(iSegReg); NOREF(fAccess); 4659 iemRaiseXcptOrIntJmp(p IemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,4663 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP, 4660 4664 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4661 4665 } … … 4663 4667 4664 4668 /** \#GP(sel) - 0d. */ 4665 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(P IEMCPU pIemCpu, RTSEL Sel)4669 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel) 4666 4670 { 4667 4671 NOREF(Sel); 4668 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4672 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4669 4673 } 4670 4674 4671 4675 #ifdef IEM_WITH_SETJMP 4672 4676 /** \#GP(sel) - 0d, longjmp. */ 4673 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(P IEMCPU pIemCpu, RTSEL Sel)4677 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel) 4674 4678 { 4675 4679 NOREF(Sel); 4676 iemRaiseXcptOrIntJmp(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4680 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4677 4681 } 4678 4682 #endif … … 4680 4684 4681 4685 /** \#GP(sel) - 0d. */ 4682 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(P IEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)4686 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess) 4683 4687 { 4684 4688 NOREF(iSegReg); NOREF(fAccess); 4685 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4686 4690 } 4687 4691 4688 4692 #ifdef IEM_WITH_SETJMP 4689 4693 /** \#GP(sel) - 0d, longjmp. */ 4690 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(P IEMCPU pIemCpu, uint32_t iSegReg,4694 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, 4691 4695 uint32_t fAccess) 4692 4696 { 4693 4697 NOREF(iSegReg); NOREF(fAccess); 4694 iemRaiseXcptOrIntJmp(p IemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4698 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4695 4699 } 4696 4700 #endif … … 4698 4702 4699 4703 /** \#PF(n) - 0e. */ 4700 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(P IEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)4704 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) 4701 4705 { 4702 4706 uint16_t uErr; … … 4719 4723 } 4720 4724 4721 if (p IemCpu->uCpl == 3)4725 if (pVCpu->iem.s.uCpl == 3) 4722 4726 uErr |= X86_TRAP_PF_US; 4723 4727 4724 4728 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE 4725 && ( (p IemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE)4726 && (p IemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) )4729 && ( (pVCpu->iem.s.CTX_SUFF(pCtx)->cr4 & X86_CR4_PAE) 4730 && (pVCpu->iem.s.CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) ) 4727 4731 uErr |= X86_TRAP_PF_ID; 4728 4732 … … 4736 4740 if (fAccess & IEM_ACCESS_TYPE_WRITE) 4737 4741 { 4738 if (!IEM_FULL_VERIFICATION_REM_ENABLED(p IemCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))4742 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ)) 4739 4743 uErr |= X86_TRAP_PF_RW; 4740 4744 } 4741 4745 #endif 4742 4746 4743 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,4747 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2, 4744 4748 uErr, GCPtrWhere); 4745 4749 } … … 4747 4751 4748 4752 /** \#MF(0) - 10. */ 4749 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(P IEMCPU pIemCpu)4750 { 4751 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4753 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu) 4754 { 4755 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4752 4756 } 4753 4757 4754 4758 4755 4759 /** \#AC(0) - 11. */ 4756 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(P IEMCPU pIemCpu)4757 { 4758 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4760 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu) 4761 { 4762 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4759 4763 } 4760 4764 … … 4772 4776 { 4773 4777 NOREF(cbInstr); 4774 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4778 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4775 4779 } 4776 4780 … … 4788 4792 { 4789 4793 NOREF(cbInstr); 4790 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4794 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4791 4795 } 4792 4796 … … 4804 4808 { 4805 4809 NOREF(cbInstr); 4806 return iemRaiseXcptOrInt(p IemCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4810 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0); 4807 4811 } 4808 4812 … … 4822 4826 * Recalculates the effective operand size. 4823 4827 * 4824 * @param p IemCpu The IEM state.4825 */ 4826 IEM_STATIC void iemRecalEffOpSize(P IEMCPU pIemCpu)4827 { 4828 switch (p IemCpu->enmCpuMode)4828 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4829 */ 4830 IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu) 4831 { 4832 switch (pVCpu->iem.s.enmCpuMode) 4829 4833 { 4830 4834 case IEMMODE_16BIT: 4831 p IemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;4835 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT; 4832 4836 break; 4833 4837 case IEMMODE_32BIT: 4834 p IemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;4838 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT; 4835 4839 break; 4836 4840 case IEMMODE_64BIT: 4837 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))4841 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) 4838 4842 { 4839 4843 case 0: 4840 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;4844 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize; 4841 4845 break; 4842 4846 case IEM_OP_PRF_SIZE_OP: 4843 p IemCpu->enmEffOpSize = IEMMODE_16BIT;4847 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT; 4844 4848 break; 4845 4849 case IEM_OP_PRF_SIZE_REX_W: 4846 4850 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP: 4847 p IemCpu->enmEffOpSize = IEMMODE_64BIT;4851 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT; 4848 4852 break; 4849 4853 } … … 4859 4863 * operand size. 4860 4864 * 4861 * @param p IemCpu The IEM state.4862 */ 4863 IEM_STATIC void iemRecalEffOpSize64Default(P IEMCPU pIemCpu)4864 { 4865 Assert(p IemCpu->enmCpuMode == IEMMODE_64BIT);4866 p IemCpu->enmDefOpSize = IEMMODE_64BIT;4867 if ((p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)4868 p IemCpu->enmEffOpSize = IEMMODE_64BIT;4865 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4866 */ 4867 IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu) 4868 { 4869 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 4870 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; 4871 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP) 4872 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT; 4869 4873 else 4870 p IemCpu->enmEffOpSize = IEMMODE_16BIT;4874 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT; 4871 4875 } 4872 4876 … … 4883 4887 /** 4884 4888 * Used to add extra details about a stub case. 4885 * @param p IemCpu The IEM per CPU state.4886 */ 4887 IEM_STATIC void iemOpStubMsg2(P IEMCPU pIemCpu)4889 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4890 */ 4891 IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu) 4888 4892 { 4889 4893 #if defined(LOG_ENABLED) && defined(IN_RING3) 4890 PVM pVM = IEMCPU_TO_VM(pIemCpu); 4891 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 4894 PVM pVM = pVCpu->CTX_SUFF(pVM); 4892 4895 char szRegs[4096]; 4893 4896 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), … … 4925 4928 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr); 4926 4929 #else 4927 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", p IemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip);4930 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->iem.s.CTX_SUFF(pCtx)->cs, pVCpu->iem.s.CTX_SUFF(pCtx)->rip); 4928 4931 #endif 4929 4932 } … … 4939 4942 do { \ 4940 4943 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \ 4941 iemOpStubMsg2(p IemCpu); \4944 iemOpStubMsg2(pVCpu); \ 4942 4945 RTAssertPanic(); \ 4943 4946 } while (0) … … 4994 4997 * 4995 4998 * @returns Hidden register reference. 4996 * @param p IemCpu The per CPU data.4999 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4997 5000 * @param iSegReg The segment register. 4998 5001 */ 4999 IEM_STATIC PCPUMSELREG iemSRegGetHid(P IEMCPU pIemCpu, uint8_t iSegReg)5000 { 5001 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5002 IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg) 5003 { 5004 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5002 5005 5003 5006 Assert(iSegReg < X86_SREG_COUNT); … … 5010 5013 5011 5014 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 5012 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg)))5015 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))) 5013 5016 { /* likely */ } 5014 5017 else 5015 CPUMGuestLazyLoadHiddenSelectorReg( IEMCPU_TO_VMCPU(pIemCpu), pSReg);5018 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); 5016 5019 #else 5017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg));5020 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 5018 5021 #endif 5019 5022 return pSReg; … … 5025 5028 * 5026 5029 * @returns Hidden register reference. 5027 * @param p IemCpu The per CPU data.5030 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5028 5031 * @param pSReg The segment register. 5029 5032 */ 5030 IEM_STATIC PCPUMSELREG iemSRegUpdateHid(P IEMCPU pIemCpu, PCPUMSELREG pSReg)5033 IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg) 5031 5034 { 5032 5035 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 5033 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg))5034 CPUMGuestLazyLoadHiddenSelectorReg( IEMCPU_TO_VMCPU(pIemCpu), pSReg);5036 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 5037 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); 5035 5038 #else 5036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg));5037 NOREF(p IemCpu);5039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 5040 NOREF(pVCpu); 5038 5041 #endif 5039 5042 return pSReg; … … 5046 5049 * 5047 5050 * @returns Pointer to the selector variable. 5048 * @param p IemCpu The per CPU data.5051 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5049 5052 * @param iSegReg The segment register. 5050 5053 */ 5051 IEM_STATIC uint16_t *iemSRegRef(P IEMCPU pIemCpu, uint8_t iSegReg)5052 { 5053 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5054 IEM_STATIC uint16_t *iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg) 5055 { 5056 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5054 5057 switch (iSegReg) 5055 5058 { … … 5069 5072 * 5070 5073 * @returns The selector value. 5071 * @param p IemCpu The per CPU data.5074 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5072 5075 * @param iSegReg The segment register. 5073 5076 */ 5074 IEM_STATIC uint16_t iemSRegFetchU16(P IEMCPU pIemCpu, uint8_t iSegReg)5075 { 5076 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5077 IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg) 5078 { 5079 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5077 5080 switch (iSegReg) 5078 5081 { … … 5092 5095 * 5093 5096 * @returns Register reference. 5094 * @param p IemCpu The per CPU data.5097 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5095 5098 * @param iReg The general register. 5096 5099 */ 5097 IEM_STATIC void *iemGRegRef(P IEMCPU pIemCpu, uint8_t iReg)5098 { 5099 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5100 IEM_STATIC void *iemGRegRef(PVMCPU pVCpu, uint8_t iReg) 5101 { 5102 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5100 5103 switch (iReg) 5101 5104 { … … 5127 5130 * 5128 5131 * @returns Register reference. 5129 * @param p IemCpu The per CPU data.5132 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5130 5133 * @param iReg The register. 5131 5134 */ 5132 IEM_STATIC uint8_t *iemGRegRefU8(P IEMCPU pIemCpu, uint8_t iReg)5133 { 5134 if (p IemCpu->fPrefixes & IEM_OP_PRF_REX)5135 return (uint8_t *)iemGRegRef(p IemCpu, iReg);5136 5137 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(p IemCpu, iReg & 3);5135 IEM_STATIC uint8_t *iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg) 5136 { 5137 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) 5138 return (uint8_t *)iemGRegRef(pVCpu, iReg); 5139 5140 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pVCpu, iReg & 3); 5138 5141 if (iReg >= 4) 5139 5142 pu8Reg++; … … 5146 5149 * 5147 5150 * @returns The register value. 5148 * @param p IemCpu The per CPU data.5151 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5149 5152 * @param iReg The register. 5150 5153 */ 5151 IEM_STATIC uint8_t iemGRegFetchU8(P IEMCPU pIemCpu, uint8_t iReg)5152 { 5153 uint8_t const *pbSrc = iemGRegRefU8(p IemCpu, iReg);5154 IEM_STATIC uint8_t iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg) 5155 { 5156 uint8_t const *pbSrc = iemGRegRefU8(pVCpu, iReg); 5154 5157 return *pbSrc; 5155 5158 } … … 5160 5163 * 5161 5164 * @returns The register value. 5162 * @param p IemCpu The per CPU data.5165 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5163 5166 * @param iReg The register. 5164 5167 */ 5165 IEM_STATIC uint16_t iemGRegFetchU16(P IEMCPU pIemCpu, uint8_t iReg)5166 { 5167 return *(uint16_t *)iemGRegRef(p IemCpu, iReg);5168 IEM_STATIC uint16_t iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg) 5169 { 5170 return *(uint16_t *)iemGRegRef(pVCpu, iReg); 5168 5171 } 5169 5172 … … 5173 5176 * 5174 5177 * @returns The register value. 5175 * @param p IemCpu The per CPU data.5178 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5176 5179 * @param iReg The register. 5177 5180 */ 5178 IEM_STATIC uint32_t iemGRegFetchU32(P IEMCPU pIemCpu, uint8_t iReg)5179 { 5180 return *(uint32_t *)iemGRegRef(p IemCpu, iReg);5181 IEM_STATIC uint32_t iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg) 5182 { 5183 return *(uint32_t *)iemGRegRef(pVCpu, iReg); 5181 5184 } 5182 5185 … … 5186 5189 * 5187 5190 * @returns The register value. 5188 * @param p IemCpu The per CPU data.5191 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5189 5192 * @param iReg The register. 5190 5193 */ 5191 IEM_STATIC uint64_t iemGRegFetchU64(P IEMCPU pIemCpu, uint8_t iReg)5192 { 5193 return *(uint64_t *)iemGRegRef(p IemCpu, iReg);5194 IEM_STATIC uint64_t iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg) 5195 { 5196 return *(uint64_t *)iemGRegRef(pVCpu, iReg); 5194 5197 } 5195 5198 … … 5201 5204 * segment limit. 5202 5205 * 5203 * @param p IemCpu The per CPU data.5206 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5204 5207 * @param offNextInstr The offset of the next instruction. 5205 5208 */ 5206 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(P IEMCPU pIemCpu, int8_t offNextInstr)5207 { 5208 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5209 switch (p IemCpu->enmEffOpSize)5209 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr) 5210 { 5211 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5212 switch (pVCpu->iem.s.enmEffOpSize) 5210 5213 { 5211 5214 case IEMMODE_16BIT: 5212 5215 { 5213 uint16_t uNewIp = pCtx->ip + offNextInstr + p IemCpu->offOpcode;5216 uint16_t uNewIp = pCtx->ip + offNextInstr + pVCpu->iem.s.offOpcode; 5214 5217 if ( uNewIp > pCtx->cs.u32Limit 5215 && p IemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */5216 return iemRaiseGeneralProtectionFault0(p IemCpu);5218 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ 5219 return iemRaiseGeneralProtectionFault0(pVCpu); 5217 5220 pCtx->rip = uNewIp; 5218 5221 break; … … 5222 5225 { 5223 5226 Assert(pCtx->rip <= UINT32_MAX); 5224 Assert(p IemCpu->enmCpuMode != IEMMODE_64BIT);5225 5226 uint32_t uNewEip = pCtx->eip + offNextInstr + p IemCpu->offOpcode;5227 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 5228 5229 uint32_t uNewEip = pCtx->eip + offNextInstr + pVCpu->iem.s.offOpcode; 5227 5230 if (uNewEip > pCtx->cs.u32Limit) 5228 return iemRaiseGeneralProtectionFault0(p IemCpu);5231 return iemRaiseGeneralProtectionFault0(pVCpu); 5229 5232 pCtx->rip = uNewEip; 5230 5233 break; … … 5233 5236 case IEMMODE_64BIT: 5234 5237 { 5235 Assert(p IemCpu->enmCpuMode == IEMMODE_64BIT);5236 5237 uint64_t uNewRip = pCtx->rip + offNextInstr + p IemCpu->offOpcode;5238 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 5239 5240 uint64_t uNewRip = pCtx->rip + offNextInstr + pVCpu->iem.s.offOpcode; 5238 5241 if (!IEM_IS_CANONICAL(uNewRip)) 5239 return iemRaiseGeneralProtectionFault0(p IemCpu);5242 return iemRaiseGeneralProtectionFault0(pVCpu); 5240 5243 pCtx->rip = uNewRip; 5241 5244 break; … … 5248 5251 5249 5252 /* Flush the prefetch buffer. */ 5250 p IemCpu->cbOpcode = pIemCpu->offOpcode;5253 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5251 5254 5252 5255 return VINF_SUCCESS; … … 5261 5264 * 5262 5265 * @returns Strict VBox status code. 5263 * @param p IemCpu The per CPU data.5266 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5264 5267 * @param offNextInstr The offset of the next instruction. 5265 5268 */ 5266 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(P IEMCPU pIemCpu, int16_t offNextInstr)5267 { 5268 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5269 Assert(p IemCpu->enmEffOpSize == IEMMODE_16BIT);5270 5271 uint16_t uNewIp = pCtx->ip + offNextInstr + p IemCpu->offOpcode;5269 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr) 5270 { 5271 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5272 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT); 5273 5274 uint16_t uNewIp = pCtx->ip + offNextInstr + pVCpu->iem.s.offOpcode; 5272 5275 if ( uNewIp > pCtx->cs.u32Limit 5273 && p IemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */5274 return iemRaiseGeneralProtectionFault0(p IemCpu);5276 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ 5277 return iemRaiseGeneralProtectionFault0(pVCpu); 5275 5278 /** @todo Test 16-bit jump in 64-bit mode. possible? */ 5276 5279 pCtx->rip = uNewIp; … … 5278 5281 5279 5282 /* Flush the prefetch buffer. */ 5280 p IemCpu->cbOpcode = pIemCpu->offOpcode;5283 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5281 5284 5282 5285 return VINF_SUCCESS; … … 5291 5294 * 5292 5295 * @returns Strict VBox status code. 5293 * @param p IemCpu The per CPU data.5296 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5294 5297 * @param offNextInstr The offset of the next instruction. 5295 5298 */ 5296 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(P IEMCPU pIemCpu, int32_t offNextInstr)5297 { 5298 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5299 Assert(p IemCpu->enmEffOpSize != IEMMODE_16BIT);5300 5301 if (p IemCpu->enmEffOpSize == IEMMODE_32BIT)5302 { 5303 Assert(pCtx->rip <= UINT32_MAX); Assert(p IemCpu->enmCpuMode != IEMMODE_64BIT);5304 5305 uint32_t uNewEip = pCtx->eip + offNextInstr + p IemCpu->offOpcode;5299 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr) 5300 { 5301 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5302 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT); 5303 5304 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT) 5305 { 5306 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 5307 5308 uint32_t uNewEip = pCtx->eip + offNextInstr + pVCpu->iem.s.offOpcode; 5306 5309 if (uNewEip > pCtx->cs.u32Limit) 5307 return iemRaiseGeneralProtectionFault0(p IemCpu);5310 return iemRaiseGeneralProtectionFault0(pVCpu); 5308 5311 pCtx->rip = uNewEip; 5309 5312 } 5310 5313 else 5311 5314 { 5312 Assert(p IemCpu->enmCpuMode == IEMMODE_64BIT);5313 5314 uint64_t uNewRip = pCtx->rip + offNextInstr + p IemCpu->offOpcode;5315 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 5316 5317 uint64_t uNewRip = pCtx->rip + offNextInstr + pVCpu->iem.s.offOpcode; 5315 5318 if (!IEM_IS_CANONICAL(uNewRip)) 5316 return iemRaiseGeneralProtectionFault0(p IemCpu);5319 return iemRaiseGeneralProtectionFault0(pVCpu); 5317 5320 pCtx->rip = uNewRip; 5318 5321 } … … 5320 5323 5321 5324 /* Flush the prefetch buffer. */ 5322 p IemCpu->cbOpcode = pIemCpu->offOpcode;5325 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5323 5326 5324 5327 return VINF_SUCCESS; … … 5332 5335 * segment limit. 5333 5336 * 5334 * @param p IemCpu The per CPU data.5337 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5335 5338 * @param uNewRip The new RIP value. 5336 5339 */ 5337 IEM_STATIC VBOXSTRICTRC iemRegRipJump(P IEMCPU pIemCpu, uint64_t uNewRip)5338 { 5339 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5340 switch (p IemCpu->enmEffOpSize)5340 IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip) 5341 { 5342 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5343 switch (pVCpu->iem.s.enmEffOpSize) 5341 5344 { 5342 5345 case IEMMODE_16BIT: … … 5344 5347 Assert(uNewRip <= UINT16_MAX); 5345 5348 if ( uNewRip > pCtx->cs.u32Limit 5346 && p IemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */5347 return iemRaiseGeneralProtectionFault0(p IemCpu);5349 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ 5350 return iemRaiseGeneralProtectionFault0(pVCpu); 5348 5351 /** @todo Test 16-bit jump in 64-bit mode. */ 5349 5352 pCtx->rip = uNewRip; … … 5355 5358 Assert(uNewRip <= UINT32_MAX); 5356 5359 Assert(pCtx->rip <= UINT32_MAX); 5357 Assert(p IemCpu->enmCpuMode != IEMMODE_64BIT);5360 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 5358 5361 5359 5362 if (uNewRip > pCtx->cs.u32Limit) 5360 return iemRaiseGeneralProtectionFault0(p IemCpu);5363 return iemRaiseGeneralProtectionFault0(pVCpu); 5361 5364 pCtx->rip = uNewRip; 5362 5365 break; … … 5365 5368 case IEMMODE_64BIT: 5366 5369 { 5367 Assert(p IemCpu->enmCpuMode == IEMMODE_64BIT);5370 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 5368 5371 5369 5372 if (!IEM_IS_CANONICAL(uNewRip)) 5370 return iemRaiseGeneralProtectionFault0(p IemCpu);5373 return iemRaiseGeneralProtectionFault0(pVCpu); 5371 5374 pCtx->rip = uNewRip; 5372 5375 break; … … 5379 5382 5380 5383 /* Flush the prefetch buffer. */ 5381 p IemCpu->cbOpcode = pIemCpu->offOpcode;5384 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 5382 5385 5383 5386 return VINF_SUCCESS; … … 5388 5391 * Get the address of the top of the stack. 5389 5392 * 5390 * @param p IemCpu The per CPU data.5393 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5391 5394 * @param pCtx The CPU context which SP/ESP/RSP should be 5392 5395 * read. 5393 5396 */ 5394 DECLINLINE(RTGCPTR) iemRegGetEffRsp(PC IEMCPU pIemCpu, PCCPUMCTX pCtx)5395 { 5396 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5397 DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx) 5398 { 5399 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5397 5400 return pCtx->rsp; 5398 5401 if (pCtx->ss.Attr.n.u1DefBig) … … 5407 5410 * This function leaves the EFLAGS.RF flag alone. 5408 5411 * 5409 * @param p IemCpu The per CPU data.5412 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5410 5413 * @param cbInstr The number of bytes to add. 5411 5414 */ 5412 IEM_STATIC void iemRegAddToRipKeepRF(P IEMCPU pIemCpu, uint8_t cbInstr)5413 { 5414 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5415 switch (p IemCpu->enmCpuMode)5415 IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr) 5416 { 5417 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5418 switch (pVCpu->iem.s.enmCpuMode) 5416 5419 { 5417 5420 case IEMMODE_16BIT: … … 5438 5441 * Updates the RIP/EIP/IP to point to the next instruction. 5439 5442 * 5440 * @param p IemCpu The per CPU data.5441 */ 5442 IEM_STATIC void iemRegUpdateRipKeepRF(P IEMCPU pIemCpu)5443 { 5444 return iemRegAddToRipKeepRF(p IemCpu, pIemCpu->offOpcode);5443 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5444 */ 5445 IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu) 5446 { 5447 return iemRegAddToRipKeepRF(pVCpu, pVCpu->iem.s.offOpcode); 5445 5448 } 5446 5449 #endif … … 5451 5454 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF. 5452 5455 * 5453 * @param p IemCpu The per CPU data.5456 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5454 5457 * @param cbInstr The number of bytes to add. 5455 5458 */ 5456 IEM_STATIC void iemRegAddToRipAndClearRF(P IEMCPU pIemCpu, uint8_t cbInstr)5457 { 5458 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5459 IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr) 5460 { 5461 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5459 5462 5460 5463 pCtx->eflags.Bits.u1RF = 0; … … 5463 5466 #if ARCH_BITS >= 64 5464 5467 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX }; 5465 Assert(pCtx->rip <= s_aRipMasks[(unsigned)p IemCpu->enmCpuMode]);5466 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)p IemCpu->enmCpuMode];5468 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]); 5469 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]; 5467 5470 #else 5468 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5471 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5469 5472 pCtx->rip += cbInstr; 5470 5473 else 5471 5474 { 5472 5475 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX }; 5473 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)p IemCpu->enmCpuMode];5476 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]; 5474 5477 } 5475 5478 #endif … … 5480 5483 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF. 5481 5484 * 5482 * @param p IemCpu The per CPU data.5483 */ 5484 IEM_STATIC void iemRegUpdateRipAndClearRF(P IEMCPU pIemCpu)5485 { 5486 return iemRegAddToRipAndClearRF(p IemCpu, pIemCpu->offOpcode);5485 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5486 */ 5487 IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu) 5488 { 5489 return iemRegAddToRipAndClearRF(pVCpu, pVCpu->iem.s.offOpcode); 5487 5490 } 5488 5491 … … 5491 5494 * Adds to the stack pointer. 5492 5495 * 5493 * @param p IemCpu The per CPU data.5496 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5494 5497 * @param pCtx The CPU context which SP/ESP/RSP should be 5495 5498 * updated. 5496 5499 * @param cbToAdd The number of bytes to add (8-bit!). 5497 5500 */ 5498 DECLINLINE(void) iemRegAddToRsp(PC IEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToAdd)5499 { 5500 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5501 DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd) 5502 { 5503 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5501 5504 pCtx->rsp += cbToAdd; 5502 5505 else if (pCtx->ss.Attr.n.u1DefBig) … … 5510 5513 * Subtracts from the stack pointer. 5511 5514 * 5512 * @param p IemCpu The per CPU data.5515 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5513 5516 * @param pCtx The CPU context which SP/ESP/RSP should be 5514 5517 * updated. 5515 5518 * @param cbToSub The number of bytes to subtract (8-bit!). 5516 5519 */ 5517 DECLINLINE(void) iemRegSubFromRsp(PC IEMCPU pIemCpu, PCPUMCTX pCtx, uint8_t cbToSub)5518 { 5519 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5520 DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub) 5521 { 5522 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5520 5523 pCtx->rsp -= cbToSub; 5521 5524 else if (pCtx->ss.Attr.n.u1DefBig) … … 5529 5532 * Adds to the temporary stack pointer. 5530 5533 * 5531 * @param p IemCpu The per CPU data.5534 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5532 5535 * @param pTmpRsp The temporary SP/ESP/RSP to update. 5533 5536 * @param cbToAdd The number of bytes to add (16-bit). 5534 5537 * @param pCtx Where to get the current stack mode. 5535 5538 */ 5536 DECLINLINE(void) iemRegAddToRspEx(PC IEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)5537 { 5538 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5539 DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd) 5540 { 5541 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5539 5542 pTmpRsp->u += cbToAdd; 5540 5543 else if (pCtx->ss.Attr.n.u1DefBig) … … 5548 5551 * Subtracts from the temporary stack pointer. 5549 5552 * 5550 * @param p IemCpu The per CPU data.5553 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5551 5554 * @param pTmpRsp The temporary SP/ESP/RSP to update. 5552 5555 * @param cbToSub The number of bytes to subtract. … … 5555 5558 * expecting that. 5556 5559 */ 5557 DECLINLINE(void) iemRegSubFromRspEx(PC IEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)5558 { 5559 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5560 DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub) 5561 { 5562 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5560 5563 pTmpRsp->u -= cbToSub; 5561 5564 else if (pCtx->ss.Attr.n.u1DefBig) … … 5571 5574 * 5572 5575 * @returns Effective stack addressf for the push. 5573 * @param p IemCpu The IEM per CPU data.5576 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5574 5577 * @param pCtx Where to get the current stack mode. 5575 5578 * @param cbItem The size of the stack item to pop. 5576 5579 * @param puNewRsp Where to return the new RSP value. 5577 5580 */ 5578 DECLINLINE(RTGCPTR) iemRegGetRspForPush(PC IEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)5581 DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp) 5579 5582 { 5580 5583 RTUINT64U uTmpRsp; … … 5582 5585 uTmpRsp.u = pCtx->rsp; 5583 5586 5584 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5587 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5585 5588 GCPtrTop = uTmpRsp.u -= cbItem; 5586 5589 else if (pCtx->ss.Attr.n.u1DefBig) … … 5598 5601 * 5599 5602 * @returns Current stack pointer. 5600 * @param p IemCpu The per CPU data.5603 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5601 5604 * @param pCtx Where to get the current stack mode. 5602 5605 * @param cbItem The size of the stack item to pop. 5603 5606 * @param puNewRsp Where to return the new RSP value. 5604 5607 */ 5605 DECLINLINE(RTGCPTR) iemRegGetRspForPop(PC IEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)5608 DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp) 5606 5609 { 5607 5610 RTUINT64U uTmpRsp; … … 5609 5612 uTmpRsp.u = pCtx->rsp; 5610 5613 5611 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5614 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5612 5615 { 5613 5616 GCPtrTop = uTmpRsp.u; … … 5634 5637 * 5635 5638 * @returns Effective stack addressf for the push. 5636 * @param p IemCpu The per CPU data.5639 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5637 5640 * @param pCtx Where to get the current stack mode. 5638 5641 * @param pTmpRsp The temporary stack pointer. This is updated. 5639 5642 * @param cbItem The size of the stack item to pop. 5640 5643 */ 5641 DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PC IEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)5644 DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem) 5642 5645 { 5643 5646 RTGCPTR GCPtrTop; 5644 5647 5645 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5648 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5646 5649 GCPtrTop = pTmpRsp->u -= cbItem; 5647 5650 else if (pCtx->ss.Attr.n.u1DefBig) … … 5658 5661 * 5659 5662 * @returns Current stack pointer. 5660 * @param p IemCpu The per CPU data.5663 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5661 5664 * @param pCtx Where to get the current stack mode. 5662 5665 * @param pTmpRsp The temporary stack pointer. This is updated. 5663 5666 * @param cbItem The size of the stack item to pop. 5664 5667 */ 5665 DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PC IEMCPU pIemCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)5668 DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem) 5666 5669 { 5667 5670 RTGCPTR GCPtrTop; 5668 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5671 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5669 5672 { 5670 5673 GCPtrTop = pTmpRsp->u; … … 5698 5701 * This is necessary in ring-0 and raw-mode context (nop in ring-3). 5699 5702 * 5700 * @param p IemCpu The IEM per CPU data.5701 */ 5702 DECLINLINE(void) iemFpuPrepareUsage(P IEMCPU pIemCpu)5703 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5704 */ 5705 DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu) 5703 5706 { 5704 5707 #ifdef IN_RING3 5705 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);5708 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM); 5706 5709 #else 5707 CPUMRZFpuStatePrepareHostCpuForUse( IEMCPU_TO_VMCPU(pIemCpu));5710 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu); 5708 5711 #endif 5709 5712 } … … 5715 5718 * This is necessary in ring-0 and raw-mode context (nop in ring-3). 5716 5719 * 5717 * @param p IemCpu The IEM per CPU data.5718 */ 5719 DECLINLINE(void) iemFpuPrepareUsageSse(P IEMCPU pIemCpu)5720 { 5721 iemFpuPrepareUsage(p IemCpu);5720 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5721 */ 5722 DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu) 5723 { 5724 iemFpuPrepareUsage(pVCpu); 5722 5725 } 5723 5726 … … 5728 5731 * This is necessary in ring-0 and raw-mode context (nop in ring-3). 5729 5732 * 5730 * @param p IemCpu The IEM per CPU data.5731 */ 5732 DECLINLINE(void) iemFpuActualizeStateForRead(P IEMCPU pIemCpu)5733 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5734 */ 5735 DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu) 5733 5736 { 5734 5737 #ifdef IN_RING3 5735 NOREF(p IemCpu);5738 NOREF(pVCpu); 5736 5739 #else 5737 CPUMRZFpuStateActualizeForRead( IEMCPU_TO_VMCPU(pIemCpu));5740 CPUMRZFpuStateActualizeForRead(pVCpu); 5738 5741 #endif 5739 5742 } … … 5745 5748 * This is necessary in ring-0 and raw-mode context (nop in ring-3). 5746 5749 * 5747 * @param p IemCpu The IEM per CPU data.5748 */ 5749 DECLINLINE(void) iemFpuActualizeStateForChange(P IEMCPU pIemCpu)5750 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5751 */ 5752 DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu) 5750 5753 { 5751 5754 #ifdef IN_RING3 5752 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);5755 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM); 5753 5756 #else 5754 CPUMRZFpuStateActualizeForChange( IEMCPU_TO_VMCPU(pIemCpu));5757 CPUMRZFpuStateActualizeForChange(pVCpu); 5755 5758 #endif 5756 5759 } … … 5762 5765 * This is necessary in ring-0 and raw-mode context (nop in ring-3). 5763 5766 * 5764 * @param p IemCpu The IEM per CPU data.5765 */ 5766 DECLINLINE(void) iemFpuActualizeSseStateForRead(P IEMCPU pIemCpu)5767 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5768 */ 5769 DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu) 5767 5770 { 5768 5771 #if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM) 5769 NOREF(p IemCpu);5772 NOREF(pVCpu); 5770 5773 #else 5771 CPUMRZFpuStateActualizeSseForRead( IEMCPU_TO_VMCPU(pIemCpu));5774 CPUMRZFpuStateActualizeSseForRead(pVCpu); 5772 5775 #endif 5773 5776 } … … 5779 5782 * This is necessary in ring-0 and raw-mode context (nop in ring-3). 5780 5783 * 5781 * @param p IemCpu The IEM per CPU data.5782 */ 5783 DECLINLINE(void) iemFpuActualizeSseStateForChange(P IEMCPU pIemCpu)5784 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5785 */ 5786 DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu) 5784 5787 { 5785 5788 #if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM) 5786 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);5789 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM); 5787 5790 #else 5788 CPUMRZFpuStateActualizeForChange( IEMCPU_TO_VMCPU(pIemCpu));5791 CPUMRZFpuStateActualizeForChange(pVCpu); 5789 5792 #endif 5790 5793 } … … 5807 5810 * Updates the FOP, FPU.CS and FPUIP registers. 5808 5811 * 5809 * @param p IemCpu The IEM per CPU data.5812 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5810 5813 * @param pCtx The CPU context. 5811 5814 * @param pFpuCtx The FPU context. 5812 5815 */ 5813 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(P IEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)5814 { 5815 pFpuCtx->FOP = p IemCpu->abOpcode[pIemCpu->offFpuOpcode]5816 | ((uint16_t)(p IemCpu->abOpcode[pIemCpu->offFpuOpcode - 1] & 0x7) << 8);5816 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx) 5817 { 5818 pFpuCtx->FOP = pVCpu->iem.s.abOpcode[pVCpu->iem.s.offFpuOpcode] 5819 | ((uint16_t)(pVCpu->iem.s.abOpcode[pVCpu->iem.s.offFpuOpcode - 1] & 0x7) << 8); 5817 5820 /** @todo x87.CS and FPUIP needs to be kept seperately. */ 5818 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))5821 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5819 5822 { 5820 5823 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled … … 5834 5837 * Updates the x87.DS and FPUDP registers. 5835 5838 * 5836 * @param p IemCpu The IEM per CPU data.5839 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5837 5840 * @param pCtx The CPU context. 5838 5841 * @param pFpuCtx The FPU context. … … 5840 5843 * @param GCPtrEff The effective address relative to @a iEffSeg. 5841 5844 */ 5842 DECLINLINE(void) iemFpuUpdateDP(P IEMCPU pIemCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)5845 DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff) 5843 5846 { 5844 5847 RTSEL sel; … … 5856 5859 } 5857 5860 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */ 5858 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))5861 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5859 5862 { 5860 5863 pFpuCtx->DS = 0; … … 5915 5918 * exception prevents it. 5916 5919 * 5917 * @param p IemCpu The IEM per CPU data.5920 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5918 5921 * @param pResult The FPU operation result to push. 5919 5922 * @param pFpuCtx The FPU context. 5920 5923 */ 5921 IEM_STATIC void iemFpuMaybePushResult(P IEMCPU pIemCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)5924 IEM_STATIC void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) 5922 5925 { 5923 5926 /* Update FSW and bail if there are pending exceptions afterwards. */ … … 6023 6026 * Pushes a FPU result onto the FPU stack if no pending exception prevents it. 6024 6027 * 6025 * @param p IemCpu The IEM per CPU data.6028 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6026 6029 * @param pResult The FPU operation result to push. 6027 6030 */ 6028 IEM_STATIC void iemFpuPushResult(P IEMCPU pIemCpu, PIEMFPURESULT pResult)6029 { 6030 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6031 IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult) 6032 { 6033 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6031 6034 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6032 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6033 iemFpuMaybePushResult(p IemCpu, pResult, pFpuCtx);6035 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6036 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx); 6034 6037 } 6035 6038 … … 6039 6042 * and sets FPUDP and FPUDS. 6040 6043 * 6041 * @param p IemCpu The IEM per CPU data.6044 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6042 6045 * @param pResult The FPU operation result to push. 6043 6046 * @param iEffSeg The effective segment register. 6044 6047 * @param GCPtrEff The effective address relative to @a iEffSeg. 6045 6048 */ 6046 IEM_STATIC void iemFpuPushResultWithMemOp(P IEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)6047 { 6048 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6049 IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) 6050 { 6051 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6049 6052 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6050 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6051 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6052 iemFpuMaybePushResult(p IemCpu, pResult, pFpuCtx);6053 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6054 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6055 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx); 6053 6056 } 6054 6057 … … 6058 6061 * unless a pending exception prevents it. 6059 6062 * 6060 * @param p IemCpu The IEM per CPU data.6063 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6061 6064 * @param pResult The FPU operation result to store and push. 6062 6065 */ 6063 IEM_STATIC void iemFpuPushResultTwo(P IEMCPU pIemCpu, PIEMFPURESULTTWO pResult)6064 { 6065 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6066 IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult) 6067 { 6068 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6066 6069 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6067 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6070 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6068 6071 6069 6072 /* Update FSW and bail if there are pending exceptions afterwards. */ … … 6112 6115 * FOP. 6113 6116 * 6114 * @param p IemCpu The IEM per CPU data.6117 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6115 6118 * @param pResult The result to store. 6116 6119 * @param iStReg Which FPU register to store it in. 6117 6120 */ 6118 IEM_STATIC void iemFpuStoreResult(P IEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)6119 { 6120 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6121 IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) 6122 { 6123 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6121 6124 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6122 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6125 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6123 6126 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 6124 6127 } … … 6129 6132 * FOP, and then pops the stack. 6130 6133 * 6131 * @param p IemCpu The IEM per CPU data.6134 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6132 6135 * @param pResult The result to store. 6133 6136 * @param iStReg Which FPU register to store it in. 6134 6137 */ 6135 IEM_STATIC void iemFpuStoreResultThenPop(P IEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg)6136 { 6137 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6138 IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) 6139 { 6140 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6138 6141 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6139 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6142 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6140 6143 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 6141 6144 iemFpuMaybePopOne(pFpuCtx); … … 6147 6150 * FPUDP, and FPUDS. 6148 6151 * 6149 * @param p IemCpu The IEM per CPU data.6152 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6150 6153 * @param pResult The result to store. 6151 6154 * @param iStReg Which FPU register to store it in. … … 6153 6156 * @param GCPtrEff The effective memory operand offset. 6154 6157 */ 6155 IEM_STATIC void iemFpuStoreResultWithMemOp(P IEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg,6158 IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, 6156 6159 uint8_t iEffSeg, RTGCPTR GCPtrEff) 6157 6160 { 6158 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6161 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6159 6162 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6160 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6161 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6163 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6164 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6162 6165 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 6163 6166 } … … 6168 6171 * FPUDP, and FPUDS, and then pops the stack. 6169 6172 * 6170 * @param p IemCpu The IEM per CPU data.6173 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6171 6174 * @param pResult The result to store. 6172 6175 * @param iStReg Which FPU register to store it in. … … 6174 6177 * @param GCPtrEff The effective memory operand offset. 6175 6178 */ 6176 IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(P IEMCPU pIemCpu, PIEMFPURESULT pResult,6179 IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, 6177 6180 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 6178 6181 { 6179 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6182 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6180 6183 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6181 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6182 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6184 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6185 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6183 6186 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 6184 6187 iemFpuMaybePopOne(pFpuCtx); … … 6189 6192 * Updates the FOP, FPUIP, and FPUCS. For FNOP. 6190 6193 * 6191 * @param p IemCpu The IEM per CPU data.6192 */ 6193 IEM_STATIC void iemFpuUpdateOpcodeAndIp(P IEMCPU pIemCpu)6194 { 6195 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6194 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6195 */ 6196 IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu) 6197 { 6198 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6196 6199 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6197 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6200 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6198 6201 } 6199 6202 … … 6202 6205 * Marks the specified stack register as free (for FFREE). 6203 6206 * 6204 * @param p IemCpu The IEM per CPU data.6207 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6205 6208 * @param iStReg The register to free. 6206 6209 */ 6207 IEM_STATIC void iemFpuStackFree(P IEMCPU pIemCpu, uint8_t iStReg)6210 IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg) 6208 6211 { 6209 6212 Assert(iStReg < 8); 6210 PX86FXSTATE pFpuCtx = &p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;6213 PX86FXSTATE pFpuCtx = &pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6211 6214 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 6212 6215 pFpuCtx->FTW &= ~RT_BIT(iReg); … … 6217 6220 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it. 6218 6221 * 6219 * @param p IemCpu The IEM per CPU data.6220 */ 6221 IEM_STATIC void iemFpuStackIncTop(P IEMCPU pIemCpu)6222 { 6223 PX86FXSTATE pFpuCtx = &p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;6222 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6223 */ 6224 IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu) 6225 { 6226 PX86FXSTATE pFpuCtx = &pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6224 6227 uint16_t uFsw = pFpuCtx->FSW; 6225 6228 uint16_t uTop = uFsw & X86_FSW_TOP_MASK; … … 6234 6237 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything. 6235 6238 * 6236 * @param p IemCpu The IEM per CPU data.6237 */ 6238 IEM_STATIC void iemFpuStackDecTop(P IEMCPU pIemCpu)6239 { 6240 PX86FXSTATE pFpuCtx = &p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;6239 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6240 */ 6241 IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu) 6242 { 6243 PX86FXSTATE pFpuCtx = &pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6241 6244 uint16_t uFsw = pFpuCtx->FSW; 6242 6245 uint16_t uTop = uFsw & X86_FSW_TOP_MASK; … … 6251 6254 * Updates the FSW, FOP, FPUIP, and FPUCS. 6252 6255 * 6253 * @param p IemCpu The IEM per CPU data.6256 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6254 6257 * @param u16FSW The FSW from the current instruction. 6255 6258 */ 6256 IEM_STATIC void iemFpuUpdateFSW(P IEMCPU pIemCpu, uint16_t u16FSW)6257 { 6258 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6259 IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW) 6260 { 6261 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6259 6262 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6260 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6263 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6261 6264 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 6262 6265 } … … 6266 6269 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack. 6267 6270 * 6268 * @param p IemCpu The IEM per CPU data.6271 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6269 6272 * @param u16FSW The FSW from the current instruction. 6270 6273 */ 6271 IEM_STATIC void iemFpuUpdateFSWThenPop(P IEMCPU pIemCpu, uint16_t u16FSW)6272 { 6273 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6274 IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW) 6275 { 6276 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6274 6277 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6275 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6278 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6276 6279 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 6277 6280 iemFpuMaybePopOne(pFpuCtx); … … 6282 6285 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. 6283 6286 * 6284 * @param p IemCpu The IEM per CPU data.6287 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6285 6288 * @param u16FSW The FSW from the current instruction. 6286 6289 * @param iEffSeg The effective memory operand selector register. 6287 6290 * @param GCPtrEff The effective memory operand offset. 6288 6291 */ 6289 IEM_STATIC void iemFpuUpdateFSWWithMemOp(P IEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)6290 { 6291 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6292 IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) 6293 { 6294 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6292 6295 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6293 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6294 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6296 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6297 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6295 6298 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 6296 6299 } … … 6300 6303 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice. 6301 6304 * 6302 * @param p IemCpu The IEM per CPU data.6305 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6303 6306 * @param u16FSW The FSW from the current instruction. 6304 6307 */ 6305 IEM_STATIC void iemFpuUpdateFSWThenPopPop(P IEMCPU pIemCpu, uint16_t u16FSW)6306 { 6307 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6308 IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW) 6309 { 6310 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6308 6311 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6309 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6312 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6310 6313 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 6311 6314 iemFpuMaybePopOne(pFpuCtx); … … 6317 6320 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack. 6318 6321 * 6319 * @param p IemCpu The IEM per CPU data.6322 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6320 6323 * @param u16FSW The FSW from the current instruction. 6321 6324 * @param iEffSeg The effective memory operand selector register. 6322 6325 * @param GCPtrEff The effective memory operand offset. 6323 6326 */ 6324 IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(P IEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)6325 { 6326 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6327 IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) 6328 { 6329 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6327 6330 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6328 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6329 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6331 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6332 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6330 6333 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 6331 6334 iemFpuMaybePopOne(pFpuCtx); … … 6336 6339 * Worker routine for raising an FPU stack underflow exception. 6337 6340 * 6338 * @param p IemCpu The IEM per CPU data.6341 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6339 6342 * @param pFpuCtx The FPU context. 6340 6343 * @param iStReg The stack register being accessed. 6341 6344 */ 6342 IEM_STATIC void iemFpuStackUnderflowOnly(P IEMCPU pIemCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)6345 IEM_STATIC void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg) 6343 6346 { 6344 6347 Assert(iStReg < 8 || iStReg == UINT8_MAX); … … 6366 6369 * Raises a FPU stack underflow exception. 6367 6370 * 6368 * @param p IemCpu The IEM per CPU data.6371 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6369 6372 * @param iStReg The destination register that should be loaded 6370 6373 * with QNaN if \#IS is not masked. Specify 6371 6374 * UINT8_MAX if none (like for fcom). 6372 6375 */ 6373 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(P IEMCPU pIemCpu, uint8_t iStReg)6374 { 6375 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6376 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg) 6377 { 6378 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6376 6379 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6377 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6378 iemFpuStackUnderflowOnly(p IemCpu, pFpuCtx, iStReg);6380 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6381 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg); 6379 6382 } 6380 6383 6381 6384 6382 6385 DECL_NO_INLINE(IEM_STATIC, void) 6383 iemFpuStackUnderflowWithMemOp(P IEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)6384 { 6385 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6386 iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 6387 { 6388 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6386 6389 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6387 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6388 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6389 iemFpuStackUnderflowOnly(p IemCpu, pFpuCtx, iStReg);6390 } 6391 6392 6393 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(P IEMCPU pIemCpu, uint8_t iStReg)6394 { 6395 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6390 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6392 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg); 6393 } 6394 6395 6396 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg) 6397 { 6398 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6396 6399 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6397 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6398 iemFpuStackUnderflowOnly(p IemCpu, pFpuCtx, iStReg);6400 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6401 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg); 6399 6402 iemFpuMaybePopOne(pFpuCtx); 6400 6403 } … … 6402 6405 6403 6406 DECL_NO_INLINE(IEM_STATIC, void) 6404 iemFpuStackUnderflowWithMemOpThenPop(P IEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)6405 { 6406 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6407 iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 6408 { 6409 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6407 6410 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6408 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6409 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6410 iemFpuStackUnderflowOnly(p IemCpu, pFpuCtx, iStReg);6411 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6412 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6413 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg); 6411 6414 iemFpuMaybePopOne(pFpuCtx); 6412 6415 } 6413 6416 6414 6417 6415 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(P IEMCPU pIemCpu)6416 { 6417 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6418 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu) 6419 { 6420 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6418 6421 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6419 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6420 iemFpuStackUnderflowOnly(p IemCpu, pFpuCtx, UINT8_MAX);6422 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6423 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX); 6421 6424 iemFpuMaybePopOne(pFpuCtx); 6422 6425 iemFpuMaybePopOne(pFpuCtx); … … 6425 6428 6426 6429 DECL_NO_INLINE(IEM_STATIC, void) 6427 iemFpuStackPushUnderflow(P IEMCPU pIemCpu)6428 { 6429 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6430 iemFpuStackPushUnderflow(PVMCPU pVCpu) 6431 { 6432 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6430 6433 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6431 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6434 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6432 6435 6433 6436 if (pFpuCtx->FCW & X86_FCW_IM) … … 6452 6455 6453 6456 DECL_NO_INLINE(IEM_STATIC, void) 6454 iemFpuStackPushUnderflowTwo(P IEMCPU pIemCpu)6455 { 6456 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6457 iemFpuStackPushUnderflowTwo(PVMCPU pVCpu) 6458 { 6459 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6457 6460 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6458 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6461 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6459 6462 6460 6463 if (pFpuCtx->FCW & X86_FCW_IM) … … 6509 6512 * Raises a FPU stack overflow exception on a push. 6510 6513 * 6511 * @param p IemCpu The IEM per CPU data.6512 */ 6513 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(P IEMCPU pIemCpu)6514 { 6515 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6514 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6515 */ 6516 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu) 6517 { 6518 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6516 6519 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6517 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6520 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6518 6521 iemFpuStackPushOverflowOnly(pFpuCtx); 6519 6522 } … … 6523 6526 * Raises a FPU stack overflow exception on a push with a memory operand. 6524 6527 * 6525 * @param p IemCpu The IEM per CPU data.6528 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6526 6529 * @param iEffSeg The effective memory operand selector register. 6527 6530 * @param GCPtrEff The effective memory operand offset. 6528 6531 */ 6529 6532 DECL_NO_INLINE(IEM_STATIC, void) 6530 iemFpuStackPushOverflowWithMemOp(P IEMCPU pIemCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)6531 { 6532 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6533 iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) 6534 { 6535 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6533 6536 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6534 iemFpuUpdateDP(p IemCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);6535 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6537 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 6538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6536 6539 iemFpuStackPushOverflowOnly(pFpuCtx); 6537 6540 } 6538 6541 6539 6542 6540 IEM_STATIC int iemFpuStRegNotEmpty(P IEMCPU pIemCpu, uint8_t iStReg)6541 { 6542 PX86FXSTATE pFpuCtx = &p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;6543 IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg) 6544 { 6545 PX86FXSTATE pFpuCtx = &pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6543 6546 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 6544 6547 if (pFpuCtx->FTW & RT_BIT(iReg)) … … 6548 6551 6549 6552 6550 IEM_STATIC int iemFpuStRegNotEmptyRef(P IEMCPU pIemCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)6551 { 6552 PX86FXSTATE pFpuCtx = &p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;6553 IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) 6554 { 6555 PX86FXSTATE pFpuCtx = &pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6553 6556 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK; 6554 6557 if (pFpuCtx->FTW & RT_BIT(iReg)) … … 6561 6564 6562 6565 6563 IEM_STATIC int iemFpu2StRegsNotEmptyRef(P IEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,6566 IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, 6564 6567 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) 6565 6568 { 6566 PX86FXSTATE pFpuCtx = &p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;6569 PX86FXSTATE pFpuCtx = &pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6567 6570 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW); 6568 6571 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK; … … 6578 6581 6579 6582 6580 IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(P IEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)6581 { 6582 PX86FXSTATE pFpuCtx = &p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87;6583 IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) 6584 { 6585 PX86FXSTATE pFpuCtx = &pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87; 6583 6586 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW); 6584 6587 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK; … … 6682 6685 * Updates the IEMCPU::cbWritten counter if applicable. 6683 6686 * 6684 * @param p IemCpu The IEM per CPU data.6687 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6685 6688 * @param fAccess The access being accounted for. 6686 6689 * @param cbMem The access size. 6687 6690 */ 6688 DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(P IEMCPU pIemCpu, uint32_t fAccess, size_t cbMem)6691 DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem) 6689 6692 { 6690 6693 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE) 6691 6694 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) ) 6692 p IemCpu->cbWritten += (uint32_t)cbMem;6695 pVCpu->iem.s.cbWritten += (uint32_t)cbMem; 6693 6696 } 6694 6697 … … 6700 6703 * @returns VBox strict status code. 6701 6704 * 6702 * @param p IemCpu The IEM per CPU data.6705 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6703 6706 * @param pHid Pointer to the hidden register. 6704 6707 * @param iSegReg The register number. … … 6708 6711 */ 6709 6712 IEM_STATIC VBOXSTRICTRC 6710 iemMemSegCheckWriteAccessEx(P IEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)6711 { 6712 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)6713 iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 6714 { 6715 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6713 6716 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; 6714 6717 else 6715 6718 { 6716 6719 if (!pHid->Attr.n.u1Present) 6717 return iemRaiseSelectorNotPresentBySegReg(p IemCpu, iSegReg);6720 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg); 6718 6721 6719 6722 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE) 6720 6723 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) ) 6721 && p IemCpu->enmCpuMode != IEMMODE_64BIT )6722 return iemRaiseSelectorInvalidAccess(p IemCpu, iSegReg, IEM_ACCESS_DATA_W);6724 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT ) 6725 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W); 6723 6726 *pu64BaseAddr = pHid->u64Base; 6724 6727 } … … 6733 6736 * @returns VBox strict status code. 6734 6737 * 6735 * @param p IemCpu The IEM per CPU data.6738 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6736 6739 * @param pHid Pointer to the hidden register. 6737 6740 * @param iSegReg The register number. … … 6741 6744 */ 6742 6745 IEM_STATIC VBOXSTRICTRC 6743 iemMemSegCheckReadAccessEx(P IEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)6744 { 6745 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)6746 iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 6747 { 6748 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6746 6749 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base; 6747 6750 else 6748 6751 { 6749 6752 if (!pHid->Attr.n.u1Present) 6750 return iemRaiseSelectorNotPresentBySegReg(p IemCpu, iSegReg);6753 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg); 6751 6754 6752 6755 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 6753 return iemRaiseSelectorInvalidAccess(p IemCpu, iSegReg, IEM_ACCESS_DATA_R);6756 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R); 6754 6757 *pu64BaseAddr = pHid->u64Base; 6755 6758 } … … 6765 6768 * @returns VBox strict status code. 6766 6769 * 6767 * @param p IemCpu The IEM per CPU data.6770 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6768 6771 * @param fAccess The kind of access which is being performed. 6769 6772 * @param iSegReg The index of the segment register to apply. … … 6775 6778 */ 6776 6779 IEM_STATIC VBOXSTRICTRC 6777 iemMemApplySegment(P IEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)6780 iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) 6778 6781 { 6779 6782 if (iSegReg == UINT8_MAX) 6780 6783 return VINF_SUCCESS; 6781 6784 6782 PCPUMSELREGHID pSel = iemSRegGetHid(p IemCpu, iSegReg);6783 switch (p IemCpu->enmCpuMode)6785 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 6786 switch (pVCpu->iem.s.enmCpuMode) 6784 6787 { 6785 6788 case IEMMODE_16BIT: … … 6797 6800 if ( (fAccess & IEM_ACCESS_TYPE_WRITE) 6798 6801 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) ) 6799 return iemRaiseSelectorInvalidAccess(p IemCpu, iSegReg, fAccess);6800 6801 if (!IEM_IS_REAL_OR_V86_MODE(p IemCpu))6802 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess); 6803 6804 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) 6802 6805 { 6803 6806 /** @todo CPL check. */ … … 6811 6814 if ( GCPtrFirst32 > pSel->u32Limit 6812 6815 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */ 6813 return iemRaiseSelectorBounds(p IemCpu, iSegReg, fAccess);6816 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess); 6814 6817 } 6815 6818 else … … 6820 6823 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1) 6821 6824 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))) 6822 return iemRaiseSelectorBounds(p IemCpu, iSegReg, fAccess);6825 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess); 6823 6826 } 6824 6827 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base; … … 6834 6837 || ( (fAccess & IEM_ACCESS_TYPE_READ) 6835 6838 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) ) 6836 && !IEM_IS_REAL_OR_V86_MODE(p IemCpu) )6837 return iemRaiseSelectorInvalidAccess(p IemCpu, iSegReg, fAccess);6839 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) ) 6840 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess); 6838 6841 6839 6842 if ( GCPtrFirst32 > pSel->u32Limit 6840 6843 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */ 6841 return iemRaiseSelectorBounds(p IemCpu, iSegReg, fAccess);6842 6843 if (!IEM_IS_REAL_OR_V86_MODE(p IemCpu))6844 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess); 6845 6846 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) 6844 6847 { 6845 6848 /** @todo CPL check. */ … … 6850 6853 } 6851 6854 else 6852 return iemRaiseGeneralProtectionFault0(p IemCpu);6855 return iemRaiseGeneralProtectionFault0(pVCpu); 6853 6856 return VINF_SUCCESS; 6854 6857 } … … 6863 6866 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1))) 6864 6867 return VINF_SUCCESS; 6865 return iemRaiseGeneralProtectionFault0(p IemCpu);6868 return iemRaiseGeneralProtectionFault0(pVCpu); 6866 6869 } 6867 6870 … … 6876 6879 * can access the page as specified. 6877 6880 * 6878 * @param p IemCpu The IEM per CPU data.6881 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6879 6882 * @param GCPtrMem The virtual address. 6880 6883 * @param fAccess The intended access. … … 6882 6885 */ 6883 6886 IEM_STATIC VBOXSTRICTRC 6884 iemMemPageTranslateAndCheckAccess(P IEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)6887 iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) 6885 6888 { 6886 6889 /** @todo Need a different PGM interface here. We're currently using … … 6888 6891 RTGCPHYS GCPhys; 6889 6892 uint64_t fFlags; 6890 int rc = PGMGstGetPage( IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);6893 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys); 6891 6894 if (RT_FAILURE(rc)) 6892 6895 { … … 6894 6897 /** @todo Reserved bits in page tables. Requires new PGM interface. */ 6895 6898 *pGCPhysMem = NIL_RTGCPHYS; 6896 return iemRaisePageFault(p IemCpu, GCPtrMem, fAccess, rc);6899 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc); 6897 6900 } 6898 6901 … … 6904 6907 if ( (fAccess & IEM_ACCESS_TYPE_WRITE) 6905 6908 && !(fFlags & X86_PTE_RW) 6906 && ( p IemCpu->uCpl != 06907 || (p IemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)))6909 && ( pVCpu->iem.s.uCpl != 0 6910 || (pVCpu->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_WP))) 6908 6911 { 6909 6912 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem)); 6910 6913 *pGCPhysMem = NIL_RTGCPHYS; 6911 return iemRaisePageFault(p IemCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);6914 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED); 6912 6915 } 6913 6916 6914 6917 /* Kernel memory accessed by userland? */ 6915 6918 if ( !(fFlags & X86_PTE_US) 6916 && p IemCpu->uCpl == 36919 && pVCpu->iem.s.uCpl == 3 6917 6920 && !(fAccess & IEM_ACCESS_WHAT_SYS)) 6918 6921 { 6919 6922 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem)); 6920 6923 *pGCPhysMem = NIL_RTGCPHYS; 6921 return iemRaisePageFault(p IemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);6924 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED); 6922 6925 } 6923 6926 … … 6925 6928 if ( (fAccess & IEM_ACCESS_TYPE_EXEC) 6926 6929 && (fFlags & X86_PTE_PAE_NX) 6927 && (p IemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )6930 && (pVCpu->iem.s.CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) 6928 6931 { 6929 6932 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem)); 6930 6933 *pGCPhysMem = NIL_RTGCPHYS; 6931 return iemRaisePageFault(p IemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),6934 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE), 6932 6935 VERR_ACCESS_DENIED); 6933 6936 } … … 6942 6945 if ((fFlags & fAccessedDirty) != fAccessedDirty) 6943 6946 { 6944 int rc2 = PGMGstModifyPage( IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);6947 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty); 6945 6948 AssertRC(rc2); 6946 6949 } … … 6957 6960 * 6958 6961 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr). 6959 * @param p IemCpu The IEM per CPU data.6962 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6960 6963 * @param GCPhysMem The physical address. 6961 6964 * @param fAccess The intended access. … … 6963 6966 * @param pLock The PGM lock. 6964 6967 */ 6965 IEM_STATIC int iemMemPageMap(P IEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)6968 IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock) 6966 6969 { 6967 6970 #ifdef IEM_VERIFICATION_MODE_FULL 6968 6971 /* Force the alternative path so we can ignore writes. */ 6969 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !p IemCpu->fNoRem)6970 { 6971 if (IEM_FULL_VERIFICATION_ENABLED(p IemCpu))6972 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem) 6973 { 6974 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 6972 6975 { 6973 int rc2 = PGMPhysIemQueryAccess( IEMCPU_TO_VM(pIemCpu), GCPhysMem,6974 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), p IemCpu->fBypassHandlers);6976 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem, 6977 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers); 6975 6978 if (RT_FAILURE(rc2)) 6976 p IemCpu->fProblematicMemory = true;6979 pVCpu->iem.s.fProblematicMemory = true; 6977 6980 } 6978 6981 return VERR_PGM_PHYS_TLB_CATCH_ALL; … … 6991 6994 * living in PGM, but with publicly accessible inlined access methods 6992 6995 * could perhaps be an even better solution. */ 6993 int rc = PGMPhysIemGCPhys2Ptr( IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu),6996 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu, 6994 6997 GCPhysMem, 6995 6998 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), 6996 p IemCpu->fBypassHandlers,6999 pVCpu->iem.s.fBypassHandlers, 6997 7000 ppvMem, 6998 7001 pLock); … … 7001 7004 7002 7005 #ifdef IEM_VERIFICATION_MODE_FULL 7003 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(p IemCpu))7004 p IemCpu->fProblematicMemory = true;7006 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 7007 pVCpu->iem.s.fProblematicMemory = true; 7005 7008 #endif 7006 7009 return rc; … … 7011 7014 * Unmap a page previously mapped by iemMemPageMap. 7012 7015 * 7013 * @param p IemCpu The IEM per CPU data.7016 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7014 7017 * @param GCPhysMem The physical address. 7015 7018 * @param fAccess The intended access. … … 7017 7020 * @param pLock The PGM lock. 7018 7021 */ 7019 DECLINLINE(void) iemMemPageUnmap(P IEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)7020 { 7021 NOREF(p IemCpu);7022 DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock) 7023 { 7024 NOREF(pVCpu); 7022 7025 NOREF(GCPhysMem); 7023 7026 NOREF(fAccess); 7024 7027 NOREF(pvMem); 7025 PGMPhysReleasePageMappingLock( IEMCPU_TO_VM(pIemCpu), pLock);7028 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock); 7026 7029 } 7027 7030 … … 7031 7034 * 7032 7035 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative). 7033 * @param p IemCpu The IEM per CPU data.7036 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7034 7037 * @param pvMem The memory address. 7035 7038 * @param fAccess The access to. 7036 7039 */ 7037 DECLINLINE(int) iemMapLookup(P IEMCPU pIemCpu, void *pvMem, uint32_t fAccess)7038 { 7039 Assert(p IemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));7040 DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess) 7041 { 7042 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)); 7040 7043 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK; 7041 if ( p IemCpu->aMemMappings[0].pv == pvMem7042 && (p IemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)7044 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem 7045 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess) 7043 7046 return 0; 7044 if ( p IemCpu->aMemMappings[1].pv == pvMem7045 && (p IemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)7047 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem 7048 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess) 7046 7049 return 1; 7047 if ( p IemCpu->aMemMappings[2].pv == pvMem7048 && (p IemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)7050 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem 7051 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess) 7049 7052 return 2; 7050 7053 return VERR_NOT_FOUND; … … 7056 7059 * 7057 7060 * @returns Memory mapping index, 1024 on failure. 7058 * @param p IemCpu The IEM per CPU data.7059 */ 7060 IEM_STATIC unsigned iemMemMapFindFree(P IEMCPU pIemCpu)7061 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7062 */ 7063 IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu) 7061 7064 { 7062 7065 /* 7063 7066 * The easy case. 7064 7067 */ 7065 if (p IemCpu->cActiveMappings == 0)7066 { 7067 p IemCpu->iNextMapping = 1;7068 if (pVCpu->iem.s.cActiveMappings == 0) 7069 { 7070 pVCpu->iem.s.iNextMapping = 1; 7068 7071 return 0; 7069 7072 } 7070 7073 7071 7074 /* There should be enough mappings for all instructions. */ 7072 AssertReturn(p IemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);7073 7074 for (unsigned i = 0; i < RT_ELEMENTS(p IemCpu->aMemMappings); i++)7075 if (p IemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID)7075 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024); 7076 7077 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++) 7078 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID) 7076 7079 return i; 7077 7080 … … 7084 7087 * 7085 7088 * @returns Strict VBox status code. 7086 * @param p IemCpu The IEM per CPU data.7089 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7087 7090 * @param iMemMap The index of the buffer to commit. 7088 7091 * @param fPostponeFail Whether we can postpone writer failures to ring-3. 7089 7092 * Always false in ring-3, obviously. 7090 7093 */ 7091 IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(P IEMCPU pIemCpu, unsigned iMemMap, bool fPostponeFail)7092 { 7093 Assert(p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);7094 Assert(p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);7094 IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail) 7095 { 7096 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED); 7097 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE); 7095 7098 #ifdef IN_RING3 7096 7099 Assert(!fPostponeFail); … … 7101 7104 */ 7102 7105 #ifndef IEM_VERIFICATION_MODE_MINIMAL 7103 PVM pVM = IEMCPU_TO_VM(pIemCpu);7104 if ( !p IemCpu->aMemBbMappings[iMemMap].fUnassigned7105 && !IEM_VERIFICATION_ENABLED(p IemCpu))7106 { 7107 uint16_t const cbFirst = p IemCpu->aMemBbMappings[iMemMap].cbFirst;7108 uint16_t const cbSecond = p IemCpu->aMemBbMappings[iMemMap].cbSecond;7109 uint8_t const *pbBuf = &p IemCpu->aBounceBuffers[iMemMap].ab[0];7110 if (!p IemCpu->fBypassHandlers)7106 PVM pVM = pVCpu->CTX_SUFF(pVM); 7107 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned 7108 && !IEM_VERIFICATION_ENABLED(pVCpu)) 7109 { 7110 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst; 7111 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond; 7112 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]; 7113 if (!pVCpu->iem.s.fBypassHandlers) 7111 7114 { 7112 7115 /* … … 7115 7118 */ 7116 7119 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, 7117 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst,7120 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, 7118 7121 pbBuf, 7119 7122 cbFirst, … … 7124 7127 { 7125 7128 rcStrict = PGMPhysWrite(pVM, 7126 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond,7129 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, 7127 7130 pbBuf + cbFirst, 7128 7131 cbSecond, … … 7133 7136 { 7134 7137 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n", 7135 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,7136 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));7137 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);7138 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 7139 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 7140 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 7138 7141 } 7139 7142 # ifndef IN_RING3 … … 7141 7144 { 7142 7145 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 7143 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,7144 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));7145 p IemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;7146 VMCPU_FF_SET( IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);7147 return iemSetPassUpStatus(p IemCpu, rcStrict);7146 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 7147 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 7148 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND; 7149 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM); 7150 return iemSetPassUpStatus(pVCpu, rcStrict); 7148 7151 } 7149 7152 # endif … … 7151 7154 { 7152 7155 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 7153 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,7154 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));7156 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 7157 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 7155 7158 return rcStrict; 7156 7159 } … … 7162 7165 { 7163 7166 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n", 7164 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));7165 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);7167 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) )); 7168 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 7166 7169 } 7167 7170 else 7168 7171 { 7169 7172 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM, 7170 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond,7173 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, 7171 7174 pbBuf + cbFirst, 7172 7175 cbSecond, … … 7175 7178 { 7176 7179 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n", 7177 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),7178 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));7179 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);7180 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 7181 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 7182 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 7180 7183 } 7181 7184 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2)) 7182 7185 { 7183 7186 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n", 7184 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),7185 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));7187 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 7188 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) )); 7186 7189 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2); 7187 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);7190 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 7188 7191 } 7189 7192 # ifndef IN_RING3 … … 7191 7194 { 7192 7195 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 7193 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,7194 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));7195 p IemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;7196 VMCPU_FF_SET( IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);7197 return iemSetPassUpStatus(p IemCpu, rcStrict);7196 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 7197 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 7198 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND; 7199 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM); 7200 return iemSetPassUpStatus(pVCpu, rcStrict); 7198 7201 } 7199 7202 # endif … … 7201 7204 { 7202 7205 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 7203 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),7204 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));7206 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 7207 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) )); 7205 7208 return rcStrict2; 7206 7209 } … … 7211 7214 { 7212 7215 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n", 7213 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,7214 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));7216 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 7217 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) )); 7215 7218 if (!cbSecond) 7216 p IemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;7219 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST; 7217 7220 else 7218 p IemCpu->aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;7219 VMCPU_FF_SET( IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);7220 return iemSetPassUpStatus(p IemCpu, rcStrict);7221 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND; 7222 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM); 7223 return iemSetPassUpStatus(pVCpu, rcStrict); 7221 7224 } 7222 7225 # endif … … 7224 7227 { 7225 7228 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n", 7226 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),7227 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));7229 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict), 7230 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 7228 7231 return rcStrict; 7229 7232 } … … 7234 7237 * No access handlers, much simpler. 7235 7238 */ 7236 int rc = PGMPhysSimpleWriteGCPhys(pVM, p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);7239 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst); 7237 7240 if (RT_SUCCESS(rc)) 7238 7241 { 7239 7242 if (cbSecond) 7240 7243 { 7241 rc = PGMPhysSimpleWriteGCPhys(pVM, p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);7244 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond); 7242 7245 if (RT_SUCCESS(rc)) 7243 7246 { /* likely */ } … … 7245 7248 { 7246 7249 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n", 7247 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,7248 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));7250 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 7251 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc)); 7249 7252 return rc; 7250 7253 } … … 7254 7257 { 7255 7258 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n", 7256 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,7257 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));7259 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc, 7260 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond)); 7258 7261 return rc; 7259 7262 } … … 7266 7269 * Record the write(s). 7267 7270 */ 7268 if (!p IemCpu->fNoRem)7269 { 7270 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(p IemCpu);7271 if (!pVCpu->iem.s.fNoRem) 7272 { 7273 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 7271 7274 if (pEvtRec) 7272 7275 { 7273 7276 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE; 7274 pEvtRec->u.RamWrite.GCPhys = p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst;7275 pEvtRec->u.RamWrite.cb = p IemCpu->aMemBbMappings[iMemMap].cbFirst;7276 memcpy(pEvtRec->u.RamWrite.ab, &p IemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);7277 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(p IemCpu->aBounceBuffers[0].ab));7278 pEvtRec->pNext = *p IemCpu->ppIemEvtRecNext;7279 *p IemCpu->ppIemEvtRecNext = pEvtRec;7277 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst; 7278 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst; 7279 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst); 7280 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab)); 7281 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 7282 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 7280 7283 } 7281 if (p IemCpu->aMemBbMappings[iMemMap].cbSecond)7284 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond) 7282 7285 { 7283 pEvtRec = iemVerifyAllocRecord(p IemCpu);7286 pEvtRec = iemVerifyAllocRecord(pVCpu); 7284 7287 if (pEvtRec) 7285 7288 { 7286 7289 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE; 7287 pEvtRec->u.RamWrite.GCPhys = p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond;7288 pEvtRec->u.RamWrite.cb = p IemCpu->aMemBbMappings[iMemMap].cbSecond;7290 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond; 7291 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond; 7289 7292 memcpy(pEvtRec->u.RamWrite.ab, 7290 &p IemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],7291 p IemCpu->aMemBbMappings[iMemMap].cbSecond);7292 pEvtRec->pNext = *p IemCpu->ppIemEvtRecNext;7293 *p IemCpu->ppIemEvtRecNext = pEvtRec;7293 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst], 7294 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond); 7295 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 7296 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 7294 7297 } 7295 7298 } … … 7297 7300 #endif 7298 7301 #if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES) 7299 Log(("IEM Wrote %RGp: %.*Rhxs\n", p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst,7300 RT_MAX(RT_MIN(p IemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));7301 if (p IemCpu->aMemBbMappings[iMemMap].cbSecond)7302 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond,7303 RT_MIN(p IemCpu->aMemBbMappings[iMemMap].cbSecond, 64),7304 &p IemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst]));7305 7306 size_t cbWrote = p IemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;7302 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, 7303 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0])); 7304 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond) 7305 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, 7306 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64), 7307 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst])); 7308 7309 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond; 7307 7310 g_cbIemWrote = cbWrote; 7308 memcpy(g_abIemWrote, &p IemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));7311 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote))); 7309 7312 #endif 7310 7313 … … 7312 7315 * Free the mapping entry. 7313 7316 */ 7314 p IemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7315 Assert(p IemCpu->cActiveMappings != 0);7316 p IemCpu->cActiveMappings--;7317 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 7318 Assert(pVCpu->iem.s.cActiveMappings != 0); 7319 pVCpu->iem.s.cActiveMappings--; 7317 7320 return VINF_SUCCESS; 7318 7321 } … … 7323 7326 */ 7324 7327 IEM_STATIC VBOXSTRICTRC 7325 iemMemBounceBufferMapCrossPage(P IEMCPU pIemCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)7328 iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) 7326 7329 { 7327 7330 /* … … 7329 7332 */ 7330 7333 RTGCPHYS GCPhysFirst; 7331 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, GCPtrFirst, fAccess, &GCPhysFirst);7334 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst); 7332 7335 if (rcStrict != VINF_SUCCESS) 7333 7336 return rcStrict; 7334 7337 7335 7338 RTGCPHYS GCPhysSecond; 7336 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,7339 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK, 7337 7340 fAccess, &GCPhysSecond); 7338 7341 if (rcStrict != VINF_SUCCESS) … … 7340 7343 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 7341 7344 7342 PVM pVM = IEMCPU_TO_VM(pIemCpu);7345 PVM pVM = pVCpu->CTX_SUFF(pVM); 7343 7346 #ifdef IEM_VERIFICATION_MODE_FULL 7344 7347 /* … … 7346 7349 * the right execution engine. (TLB: Redo this.) 7347 7350 */ 7348 if (IEM_FULL_VERIFICATION_ENABLED(p IemCpu))7349 { 7350 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), p IemCpu->fBypassHandlers);7351 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 7352 { 7353 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers); 7351 7354 if (RT_SUCCESS(rc2)) 7352 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), p IemCpu->fBypassHandlers);7355 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers); 7353 7356 if (RT_FAILURE(rc2)) 7354 p IemCpu->fProblematicMemory = true;7357 pVCpu->iem.s.fProblematicMemory = true; 7355 7358 } 7356 7359 #endif … … 7361 7364 * write access. 7362 7365 */ 7363 uint8_t *pbBuf = &p IemCpu->aBounceBuffers[iMemMap].ab[0];7366 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]; 7364 7367 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK); 7365 7368 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage); … … 7367 7370 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE)) 7368 7371 { 7369 if (!p IemCpu->fBypassHandlers)7372 if (!pVCpu->iem.s.fBypassHandlers) 7370 7373 { 7371 7374 /* … … 7380 7383 { /*likely */ } 7381 7384 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 7382 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);7385 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 7383 7386 else 7384 7387 { … … 7394 7397 { 7395 7398 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2); 7396 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);7399 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 7397 7400 } 7398 7401 else … … 7436 7439 7437 7440 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3) 7438 if ( !p IemCpu->fNoRem7441 if ( !pVCpu->iem.s.fNoRem 7439 7442 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) ) 7440 7443 { … … 7442 7445 * Record the reads. 7443 7446 */ 7444 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(p IemCpu);7447 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 7445 7448 if (pEvtRec) 7446 7449 { … … 7448 7451 pEvtRec->u.RamRead.GCPhys = GCPhysFirst; 7449 7452 pEvtRec->u.RamRead.cb = cbFirstPage; 7450 pEvtRec->pNext = *p IemCpu->ppIemEvtRecNext;7451 *p IemCpu->ppIemEvtRecNext = pEvtRec;7453 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 7454 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 7452 7455 } 7453 pEvtRec = iemVerifyAllocRecord(p IemCpu);7456 pEvtRec = iemVerifyAllocRecord(pVCpu); 7454 7457 if (pEvtRec) 7455 7458 { … … 7457 7460 pEvtRec->u.RamRead.GCPhys = GCPhysSecond; 7458 7461 pEvtRec->u.RamRead.cb = cbSecondPage; 7459 pEvtRec->pNext = *p IemCpu->ppIemEvtRecNext;7460 *p IemCpu->ppIemEvtRecNext = pEvtRec;7462 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 7463 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 7461 7464 } 7462 7465 } … … 7466 7469 else 7467 7470 memset(pbBuf, 0xcc, cbMem); 7468 if (cbMem < sizeof(p IemCpu->aBounceBuffers[iMemMap].ab))7469 memset(pbBuf + cbMem, 0xaa, sizeof(p IemCpu->aBounceBuffers[iMemMap].ab) - cbMem);7471 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab)) 7472 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem); 7470 7473 #endif 7471 7474 … … 7473 7476 * Commit the bounce buffer entry. 7474 7477 */ 7475 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;7476 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;7477 p IemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;7478 p IemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;7479 p IemCpu->aMemBbMappings[iMemMap].fUnassigned = false;7480 p IemCpu->aMemMappings[iMemMap].pv = pbBuf;7481 p IemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;7482 p IemCpu->iNextMapping = iMemMap + 1;7483 p IemCpu->cActiveMappings++;7484 7485 iemMemUpdateWrittenCounter(p IemCpu, fAccess, cbMem);7478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst; 7479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond; 7480 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage; 7481 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage; 7482 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false; 7483 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf; 7484 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED; 7485 pVCpu->iem.s.iNextMapping = iMemMap + 1; 7486 pVCpu->iem.s.cActiveMappings++; 7487 7488 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 7486 7489 *ppvMem = pbBuf; 7487 7490 return VINF_SUCCESS; … … 7492 7495 * iemMemMap woker that deals with iemMemPageMap failures. 7493 7496 */ 7494 IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(P IEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,7497 IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem, 7495 7498 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) 7496 7499 { … … 7505 7508 return rcMap; 7506 7509 } 7507 p IemCpu->cPotentialExits++;7510 pVCpu->iem.s.cPotentialExits++; 7508 7511 7509 7512 /* … … 7511 7514 * write access. 7512 7515 */ 7513 uint8_t *pbBuf = &p IemCpu->aBounceBuffers[iMemMap].ab[0];7516 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]; 7514 7517 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE)) 7515 7518 { … … 7519 7522 { 7520 7523 int rc; 7521 if (!p IemCpu->fBypassHandlers)7524 if (!pVCpu->iem.s.fBypassHandlers) 7522 7525 { 7523 VBOXSTRICTRC rcStrict = PGMPhysRead( IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);7526 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM); 7524 7527 if (rcStrict == VINF_SUCCESS) 7525 7528 { /* nothing */ } 7526 7529 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict)) 7527 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);7530 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 7528 7531 else 7529 7532 { … … 7535 7538 else 7536 7539 { 7537 rc = PGMPhysSimpleReadGCPhys( IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);7540 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem); 7538 7541 if (RT_SUCCESS(rc)) 7539 7542 { /* likely */ } … … 7548 7551 7549 7552 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3) 7550 if ( !p IemCpu->fNoRem7553 if ( !pVCpu->iem.s.fNoRem 7551 7554 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) ) 7552 7555 { … … 7554 7557 * Record the read. 7555 7558 */ 7556 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(p IemCpu);7559 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 7557 7560 if (pEvtRec) 7558 7561 { … … 7560 7563 pEvtRec->u.RamRead.GCPhys = GCPhysFirst; 7561 7564 pEvtRec->u.RamRead.cb = (uint32_t)cbMem; 7562 pEvtRec->pNext = *p IemCpu->ppIemEvtRecNext;7563 *p IemCpu->ppIemEvtRecNext = pEvtRec;7565 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 7566 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 7564 7567 } 7565 7568 } … … 7571 7574 #endif 7572 7575 #ifdef VBOX_STRICT 7573 if (cbMem < sizeof(p IemCpu->aBounceBuffers[iMemMap].ab))7574 memset(pbBuf + cbMem, 0xaa, sizeof(p IemCpu->aBounceBuffers[iMemMap].ab) - cbMem);7576 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab)) 7577 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem); 7575 7578 #endif 7576 7579 … … 7578 7581 * Commit the bounce buffer entry. 7579 7582 */ 7580 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;7581 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;7582 p IemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;7583 p IemCpu->aMemBbMappings[iMemMap].cbSecond = 0;7584 p IemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;7585 p IemCpu->aMemMappings[iMemMap].pv = pbBuf;7586 p IemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;7587 p IemCpu->iNextMapping = iMemMap + 1;7588 p IemCpu->cActiveMappings++;7589 7590 iemMemUpdateWrittenCounter(p IemCpu, fAccess, cbMem);7583 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst; 7584 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS; 7585 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem; 7586 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0; 7587 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED; 7588 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf; 7589 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED; 7590 pVCpu->iem.s.iNextMapping = iMemMap + 1; 7591 pVCpu->iem.s.cActiveMappings++; 7592 7593 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 7591 7594 *ppvMem = pbBuf; 7592 7595 return VINF_SUCCESS; … … 7607 7610 * @returns VBox strict status code. 7608 7611 * 7609 * @param p IemCpu The IEM per CPU data.7612 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7610 7613 * @param ppvMem Where to return the pointer to the mapped 7611 7614 * memory. … … 7625 7628 */ 7626 7629 IEM_STATIC VBOXSTRICTRC 7627 iemMemMap(P IEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)7630 iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) 7628 7631 { 7629 7632 /* … … 7632 7635 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */ 7633 7636 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK))); 7634 Assert(p IemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));7635 7636 unsigned iMemMap = p IemCpu->iNextMapping;7637 if ( iMemMap >= RT_ELEMENTS(p IemCpu->aMemMappings)7638 || p IemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)7639 { 7640 iMemMap = iemMemMapFindFree(p IemCpu);7641 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(p IemCpu->aMemMappings),7642 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", p IemCpu->cActiveMappings,7643 p IemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,7644 p IemCpu->aMemMappings[2].fAccess),7637 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)); 7638 7639 unsigned iMemMap = pVCpu->iem.s.iNextMapping; 7640 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings) 7641 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID) 7642 { 7643 iMemMap = iemMemMapFindFree(pVCpu); 7644 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 7645 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings, 7646 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, 7647 pVCpu->iem.s.aMemMappings[2].fAccess), 7645 7648 VERR_IEM_IPE_9); 7646 7649 } … … 7650 7653 * slightly complicated happens, fall back on bounce buffering. 7651 7654 */ 7652 VBOXSTRICTRC rcStrict = iemMemApplySegment(p IemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);7655 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem); 7653 7656 if (rcStrict != VINF_SUCCESS) 7654 7657 return rcStrict; 7655 7658 7656 7659 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */ 7657 return iemMemBounceBufferMapCrossPage(p IemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);7660 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess); 7658 7661 7659 7662 RTGCPHYS GCPhysFirst; 7660 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, GCPtrMem, fAccess, &GCPhysFirst);7663 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst); 7661 7664 if (rcStrict != VINF_SUCCESS) 7662 7665 return rcStrict; … … 7668 7671 7669 7672 void *pvMem; 7670 rcStrict = iemMemPageMap(p IemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);7673 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 7671 7674 if (rcStrict != VINF_SUCCESS) 7672 return iemMemBounceBufferMapPhys(p IemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);7675 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict); 7673 7676 7674 7677 /* 7675 7678 * Fill in the mapping table entry. 7676 7679 */ 7677 p IemCpu->aMemMappings[iMemMap].pv = pvMem;7678 p IemCpu->aMemMappings[iMemMap].fAccess = fAccess;7679 p IemCpu->iNextMapping = iMemMap + 1;7680 p IemCpu->cActiveMappings++;7681 7682 iemMemUpdateWrittenCounter(p IemCpu, fAccess, cbMem);7680 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem; 7681 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess; 7682 pVCpu->iem.s.iNextMapping = iMemMap + 1; 7683 pVCpu->iem.s.cActiveMappings++; 7684 7685 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 7683 7686 *ppvMem = pvMem; 7684 7687 return VINF_SUCCESS; … … 7690 7693 * 7691 7694 * @returns Strict VBox status code. 7692 * @param p IemCpu The IEM per CPU data.7695 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7693 7696 * @param pvMem The mapping. 7694 7697 * @param fAccess The kind of access. 7695 7698 */ 7696 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(P IEMCPU pIemCpu, void *pvMem, uint32_t fAccess)7697 { 7698 int iMemMap = iemMapLookup(p IemCpu, pvMem, fAccess);7699 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess) 7700 { 7701 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 7699 7702 AssertReturn(iMemMap >= 0, iMemMap); 7700 7703 7701 7704 /* If it's bounce buffered, we may need to write back the buffer. */ 7702 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7703 { 7704 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7705 return iemMemBounceBufferCommitAndUnmap(p IemCpu, iMemMap, false /*fPostponeFail*/);7705 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED) 7706 { 7707 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE) 7708 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/); 7706 7709 } 7707 7710 /* Otherwise unlock it. */ 7708 7711 else 7709 PGMPhysReleasePageMappingLock( IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);7712 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 7710 7713 7711 7714 /* Free the entry. */ 7712 p IemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7713 Assert(p IemCpu->cActiveMappings != 0);7714 p IemCpu->cActiveMappings--;7715 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 7716 Assert(pVCpu->iem.s.cActiveMappings != 0); 7717 pVCpu->iem.s.cActiveMappings--; 7715 7718 return VINF_SUCCESS; 7716 7719 } … … 7731 7734 * @returns Pointer to the mapped memory. 7732 7735 * 7733 * @param p IemCpu The IEM per CPU data.7736 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7734 7737 * @param cbMem The number of bytes to map. This is usually 1, 7735 7738 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by … … 7746 7749 * exceptions. 7747 7750 */ 7748 IEM_STATIC void *iemMemMapJmp(P IEMCPU pIemCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)7751 IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) 7749 7752 { 7750 7753 /* … … 7753 7756 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */ 7754 7757 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK))); 7755 Assert(p IemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings));7756 7757 unsigned iMemMap = p IemCpu->iNextMapping;7758 if ( iMemMap >= RT_ELEMENTS(p IemCpu->aMemMappings)7759 || p IemCpu->aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)7760 { 7761 iMemMap = iemMemMapFindFree(p IemCpu);7762 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(p IemCpu->aMemMappings),7763 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", p IemCpu->cActiveMappings,7764 p IemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess,7765 p IemCpu->aMemMappings[2].fAccess),7766 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));7758 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)); 7759 7760 unsigned iMemMap = pVCpu->iem.s.iNextMapping; 7761 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings) 7762 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID) 7763 { 7764 iMemMap = iemMemMapFindFree(pVCpu); 7765 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 7766 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings, 7767 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, 7768 pVCpu->iem.s.aMemMappings[2].fAccess), 7769 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9)); 7767 7770 } 7768 7771 … … 7771 7774 * slightly complicated happens, fall back on bounce buffering. 7772 7775 */ 7773 VBOXSTRICTRC rcStrict = iemMemApplySegment(p IemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);7776 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem); 7774 7777 if (rcStrict == VINF_SUCCESS) { /*likely*/ } 7775 else longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));7778 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7776 7779 7777 7780 /* Crossing a page boundary? */ … … 7781 7784 { 7782 7785 void *pvMem; 7783 VBOXSTRICTRC rcStrict = iemMemBounceBufferMapCrossPage(p IemCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);7786 VBOXSTRICTRC rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess); 7784 7787 if (rcStrict == VINF_SUCCESS) 7785 7788 return pvMem; 7786 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));7789 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7787 7790 } 7788 7791 7789 7792 RTGCPHYS GCPhysFirst; 7790 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, GCPtrMem, fAccess, &GCPhysFirst);7793 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst); 7791 7794 if (rcStrict == VINF_SUCCESS) { /*likely*/ } 7792 else longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));7795 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7793 7796 7794 7797 if (fAccess & IEM_ACCESS_TYPE_WRITE) … … 7798 7801 7799 7802 void *pvMem; 7800 rcStrict = iemMemPageMap(p IemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);7803 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 7801 7804 if (rcStrict == VINF_SUCCESS) 7802 7805 { /* likely */ } … … 7804 7807 { 7805 7808 void *pvMem; 7806 rcStrict = iemMemBounceBufferMapPhys(p IemCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);7809 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict); 7807 7810 if (rcStrict == VINF_SUCCESS) 7808 7811 return pvMem; 7809 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));7812 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7810 7813 } 7811 7814 … … 7813 7816 * Fill in the mapping table entry. 7814 7817 */ 7815 p IemCpu->aMemMappings[iMemMap].pv = pvMem;7816 p IemCpu->aMemMappings[iMemMap].fAccess = fAccess;7817 p IemCpu->iNextMapping = iMemMap + 1;7818 p IemCpu->cActiveMappings++;7819 7820 iemMemUpdateWrittenCounter(p IemCpu, fAccess, cbMem);7818 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem; 7819 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess; 7820 pVCpu->iem.s.iNextMapping = iMemMap + 1; 7821 pVCpu->iem.s.cActiveMappings++; 7822 7823 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 7821 7824 return pvMem; 7822 7825 } … … 7826 7829 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error. 7827 7830 * 7828 * @param p IemCpu The IEM per CPU data.7831 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7829 7832 * @param pvMem The mapping. 7830 7833 * @param fAccess The kind of access. 7831 7834 */ 7832 IEM_STATIC void iemMemCommitAndUnmapJmp(P IEMCPU pIemCpu, void *pvMem, uint32_t fAccess)7833 { 7834 int iMemMap = iemMapLookup(p IemCpu, pvMem, fAccess);7835 AssertStmt(iMemMap >= 0, longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), iMemMap));7835 IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess) 7836 { 7837 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 7838 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap)); 7836 7839 7837 7840 /* If it's bounce buffered, we may need to write back the buffer. */ 7838 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7839 { 7840 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7841 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED) 7842 { 7843 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE) 7841 7844 { 7842 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(p IemCpu, iMemMap, false /*fPostponeFail*/);7845 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/); 7843 7846 if (rcStrict == VINF_SUCCESS) 7844 7847 return; 7845 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));7848 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7846 7849 } 7847 7850 } 7848 7851 /* Otherwise unlock it. */ 7849 7852 else 7850 PGMPhysReleasePageMappingLock( IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);7853 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 7851 7854 7852 7855 /* Free the entry. */ 7853 p IemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7854 Assert(p IemCpu->cActiveMappings != 0);7855 p IemCpu->cActiveMappings--;7856 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 7857 Assert(pVCpu->iem.s.cActiveMappings != 0); 7858 pVCpu->iem.s.cActiveMappings--; 7856 7859 } 7857 7860 … … 7868 7871 * @returns VBox status code (no strict statuses). Caller must check 7869 7872 * VMCPU_FF_IEM before repeating string instructions and similar stuff. 7870 * @param p IemCpu The IEM per CPU data.7873 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7871 7874 * @param pvMem The mapping. 7872 7875 * @param fAccess The kind of access. 7873 7876 */ 7874 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(P IEMCPU pIemCpu, void *pvMem, uint32_t fAccess)7875 { 7876 int iMemMap = iemMapLookup(p IemCpu, pvMem, fAccess);7877 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess) 7878 { 7879 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 7877 7880 AssertReturn(iMemMap >= 0, iMemMap); 7878 7881 7879 7882 /* If it's bounce buffered, we may need to write back the buffer. */ 7880 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7881 { 7882 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7883 return iemMemBounceBufferCommitAndUnmap(p IemCpu, iMemMap, true /*fPostponeFail*/);7883 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED) 7884 { 7885 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE) 7886 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/); 7884 7887 } 7885 7888 /* Otherwise unlock it. */ 7886 7889 else 7887 PGMPhysReleasePageMappingLock( IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);7890 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 7888 7891 7889 7892 /* Free the entry. */ 7890 p IemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7891 Assert(p IemCpu->cActiveMappings != 0);7892 p IemCpu->cActiveMappings--;7893 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 7894 Assert(pVCpu->iem.s.cActiveMappings != 0); 7895 pVCpu->iem.s.cActiveMappings--; 7893 7896 return VINF_SUCCESS; 7894 7897 } … … 7902 7905 * 7903 7906 * @returns Strict VBox status code to pass up. 7904 * @param p IemCpu The IEM per CPU data.7905 */ 7906 IEM_STATIC void iemMemRollback(P IEMCPU pIemCpu)7907 { 7908 Assert(p IemCpu->cActiveMappings > 0);7909 7910 uint32_t iMemMap = RT_ELEMENTS(p IemCpu->aMemMappings);7907 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7908 */ 7909 IEM_STATIC void iemMemRollback(PVMCPU pVCpu) 7910 { 7911 Assert(pVCpu->iem.s.cActiveMappings > 0); 7912 7913 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings); 7911 7914 while (iMemMap-- > 0) 7912 7915 { 7913 uint32_t fAccess = p IemCpu->aMemMappings[iMemMap].fAccess;7916 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess; 7914 7917 if (fAccess != IEM_ACCESS_INVALID) 7915 7918 { 7916 7919 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess)); 7917 p IemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7920 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 7918 7921 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED)) 7919 PGMPhysReleasePageMappingLock( IEMCPU_TO_VM(pIemCpu), &pIemCpu->aMemMappingLocks[iMemMap].Lock);7920 Assert(p IemCpu->cActiveMappings > 0);7921 p IemCpu->cActiveMappings--;7922 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 7923 Assert(pVCpu->iem.s.cActiveMappings > 0); 7924 pVCpu->iem.s.cActiveMappings--; 7922 7925 } 7923 7926 } … … 7929 7932 * 7930 7933 * @returns Strict VBox status code. 7931 * @param p IemCpu The IEM per CPU data.7934 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7932 7935 * @param pu8Dst Where to return the byte. 7933 7936 * @param iSegReg The index of the segment register to use for … … 7935 7938 * @param GCPtrMem The address of the guest memory. 7936 7939 */ 7937 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(P IEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)7940 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 7938 7941 { 7939 7942 /* The lazy approach for now... */ 7940 7943 uint8_t const *pu8Src; 7941 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);7944 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7942 7945 if (rc == VINF_SUCCESS) 7943 7946 { 7944 7947 *pu8Dst = *pu8Src; 7945 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);7948 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R); 7946 7949 } 7947 7950 return rc; … … 7954 7957 * 7955 7958 * @returns The byte. 7956 * @param p IemCpu The IEM per CPU data.7959 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7957 7960 * @param iSegReg The index of the segment register to use for 7958 7961 * this access. The base and limits are checked. 7959 7962 * @param GCPtrMem The address of the guest memory. 7960 7963 */ 7961 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)7964 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 7962 7965 { 7963 7966 /* The lazy approach for now... */ 7964 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);7967 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7965 7968 uint8_t const bRet = *pu8Src; 7966 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);7969 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R); 7967 7970 return bRet; 7968 7971 } … … 7974 7977 * 7975 7978 * @returns Strict VBox status code. 7976 * @param p IemCpu The IEM per CPU data.7979 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7977 7980 * @param pu16Dst Where to return the word. 7978 7981 * @param iSegReg The index of the segment register to use for … … 7980 7983 * @param GCPtrMem The address of the guest memory. 7981 7984 */ 7982 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(P IEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)7985 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 7983 7986 { 7984 7987 /* The lazy approach for now... */ 7985 7988 uint16_t const *pu16Src; 7986 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);7989 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7987 7990 if (rc == VINF_SUCCESS) 7988 7991 { 7989 7992 *pu16Dst = *pu16Src; 7990 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);7993 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R); 7991 7994 } 7992 7995 return rc; … … 7999 8002 * 8000 8003 * @returns The word 8001 * @param p IemCpu The IEM per CPU data.8004 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8002 8005 * @param iSegReg The index of the segment register to use for 8003 8006 * this access. The base and limits are checked. 8004 8007 * @param GCPtrMem The address of the guest memory. 8005 8008 */ 8006 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)8009 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 8007 8010 { 8008 8011 /* The lazy approach for now... */ 8009 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8012 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8010 8013 uint16_t const u16Ret = *pu16Src; 8011 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);8014 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R); 8012 8015 return u16Ret; 8013 8016 } … … 8019 8022 * 8020 8023 * @returns Strict VBox status code. 8021 * @param p IemCpu The IEM per CPU data.8024 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8022 8025 * @param pu32Dst Where to return the dword. 8023 8026 * @param iSegReg The index of the segment register to use for … … 8025 8028 * @param GCPtrMem The address of the guest memory. 8026 8029 */ 8027 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(P IEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8030 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8028 8031 { 8029 8032 /* The lazy approach for now... */ 8030 8033 uint32_t const *pu32Src; 8031 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8034 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8032 8035 if (rc == VINF_SUCCESS) 8033 8036 { 8034 8037 *pu32Dst = *pu32Src; 8035 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);8038 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); 8036 8039 } 8037 8040 return rc; … … 8041 8044 #ifdef IEM_WITH_SETJMP 8042 8045 8043 IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(P IEMCPU pIemCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)8046 IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem) 8044 8047 { 8045 8048 Assert(cbMem >= 1); … … 8049 8052 * 64-bit mode is simpler. 8050 8053 */ 8051 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8054 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8052 8055 { 8053 8056 if (iSegReg >= X86_SREG_FS) 8054 8057 { 8055 PCPUMSELREGHID pSel = iemSRegGetHid(p IemCpu, iSegReg);8058 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 8056 8059 GCPtrMem += pSel->u64Base; 8057 8060 } … … 8065 8068 else 8066 8069 { 8067 PCPUMSELREGHID pSel = iemSRegGetHid(p IemCpu, iSegReg);8070 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 8068 8071 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN)) 8069 8072 == X86DESCATTR_P /* data, expand up */ … … 8088 8091 } 8089 8092 else 8090 iemRaiseSelectorInvalidAccessJmp(p IemCpu, iSegReg, IEM_ACCESS_DATA_R);8091 iemRaiseSelectorBoundsJmp(p IemCpu, iSegReg, IEM_ACCESS_DATA_R);8092 } 8093 iemRaiseGeneralProtectionFault0Jmp(p IemCpu);8093 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R); 8094 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R); 8095 } 8096 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 8094 8097 } 8095 8098 … … 8099 8102 * 8100 8103 * @returns The dword 8101 * @param p IemCpu The IEM per CPU data.8104 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8102 8105 * @param iSegReg The index of the segment register to use for 8103 8106 * this access. The base and limits are checked. 8104 8107 * @param GCPtrMem The address of the guest memory. 8105 8108 */ 8106 IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)8107 { 8108 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8109 IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 8110 { 8111 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8109 8112 uint32_t const u32Ret = *pu32Src; 8110 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);8113 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); 8111 8114 return u32Ret; 8112 8115 } … … 8117 8120 * 8118 8121 * @returns The dword 8119 * @param p IemCpu The IEM per CPU data.8122 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8120 8123 * @param iSegReg The index of the segment register to use for 8121 8124 * this access. The base and limits are checked. 8122 8125 * @param GCPtrMem The address of the guest memory. 8123 8126 */ 8124 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)8127 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 8125 8128 { 8126 8129 # ifdef IEM_WITH_DATA_TLB 8127 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(p IemCpu, iSegReg, sizeof(uint32_t), GCPtrMem);8130 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem); 8128 8131 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t))) 8129 8132 { … … 8131 8134 } 8132 8135 8133 return iemMemFetchDataU32SafeJmp(p IemCpu, iSegReg, GCPtrMem);8136 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem); 8134 8137 # else 8135 8138 /* The lazy approach. */ 8136 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8139 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8137 8140 uint32_t const u32Ret = *pu32Src; 8138 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);8141 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); 8139 8142 return u32Ret; 8140 8143 # endif … … 8148 8151 * 8149 8152 * @returns Strict VBox status code. 8150 * @param p IemCpu The IEM per CPU data.8153 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8151 8154 * @param pu64Dst Where to return the sign extended value. 8152 8155 * @param iSegReg The index of the segment register to use for … … 8154 8157 * @param GCPtrMem The address of the guest memory. 8155 8158 */ 8156 IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(P IEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8159 IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8157 8160 { 8158 8161 /* The lazy approach for now... */ 8159 8162 int32_t const *pi32Src; 8160 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8163 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8161 8164 if (rc == VINF_SUCCESS) 8162 8165 { 8163 8166 *pu64Dst = *pi32Src; 8164 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);8167 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R); 8165 8168 } 8166 8169 #ifdef __GNUC__ /* warning: GCC may be a royal pain */ … … 8177 8180 * 8178 8181 * @returns Strict VBox status code. 8179 * @param p IemCpu The IEM per CPU data.8182 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8180 8183 * @param pu64Dst Where to return the qword. 8181 8184 * @param iSegReg The index of the segment register to use for … … 8183 8186 * @param GCPtrMem The address of the guest memory. 8184 8187 */ 8185 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(P IEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8188 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8186 8189 { 8187 8190 /* The lazy approach for now... */ 8188 8191 uint64_t const *pu64Src; 8189 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8190 8193 if (rc == VINF_SUCCESS) 8191 8194 { 8192 8195 *pu64Dst = *pu64Src; 8193 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);8196 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 8194 8197 } 8195 8198 return rc; … … 8202 8205 * 8203 8206 * @returns The qword. 8204 * @param p IemCpu The IEM per CPU data.8207 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8205 8208 * @param iSegReg The index of the segment register to use for 8206 8209 * this access. The base and limits are checked. 8207 8210 * @param GCPtrMem The address of the guest memory. 8208 8211 */ 8209 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)8212 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 8210 8213 { 8211 8214 /* The lazy approach for now... */ 8212 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8215 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8213 8216 uint64_t const u64Ret = *pu64Src; 8214 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);8217 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 8215 8218 return u64Ret; 8216 8219 } … … 8222 8225 * 8223 8226 * @returns Strict VBox status code. 8224 * @param p IemCpu The IEM per CPU data.8227 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8225 8228 * @param pu64Dst Where to return the qword. 8226 8229 * @param iSegReg The index of the segment register to use for … … 8228 8231 * @param GCPtrMem The address of the guest memory. 8229 8232 */ 8230 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(P IEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8233 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8231 8234 { 8232 8235 /* The lazy approach for now... */ 8233 8236 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 8234 8237 if (RT_UNLIKELY(GCPtrMem & 15)) 8235 return iemRaiseGeneralProtectionFault0(p IemCpu);8238 return iemRaiseGeneralProtectionFault0(pVCpu); 8236 8239 8237 8240 uint64_t const *pu64Src; 8238 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8239 8242 if (rc == VINF_SUCCESS) 8240 8243 { 8241 8244 *pu64Dst = *pu64Src; 8242 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);8245 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 8243 8246 } 8244 8247 return rc; … … 8251 8254 * 8252 8255 * @returns The qword. 8253 * @param p IemCpu The IEM per CPU data.8256 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8254 8257 * @param iSegReg The index of the segment register to use for 8255 8258 * this access. The base and limits are checked. 8256 8259 * @param GCPtrMem The address of the guest memory. 8257 8260 */ 8258 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)8261 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) 8259 8262 { 8260 8263 /* The lazy approach for now... */ … … 8262 8265 if (RT_LIKELY(!(GCPtrMem & 15))) 8263 8266 { 8264 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8267 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8265 8268 uint64_t const u64Ret = *pu64Src; 8266 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);8269 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 8267 8270 return u64Ret; 8268 8271 } 8269 8272 8270 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(p IemCpu);8271 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));8273 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu); 8274 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc)); 8272 8275 } 8273 8276 #endif … … 8278 8281 * 8279 8282 * @returns Strict VBox status code. 8280 * @param p IemCpu The IEM per CPU data.8283 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8281 8284 * @param pr80Dst Where to return the tword. 8282 8285 * @param iSegReg The index of the segment register to use for … … 8284 8287 * @param GCPtrMem The address of the guest memory. 8285 8288 */ 8286 IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(P IEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8289 IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8287 8290 { 8288 8291 /* The lazy approach for now... */ 8289 8292 PCRTFLOAT80U pr80Src; 8290 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8293 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8291 8294 if (rc == VINF_SUCCESS) 8292 8295 { 8293 8296 *pr80Dst = *pr80Src; 8294 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);8297 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R); 8295 8298 } 8296 8299 return rc; … … 8302 8305 * Fetches a data tword, longjmp on error. 8303 8306 * 8304 * @param p IemCpu The IEM per CPU data.8307 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8305 8308 * @param pr80Dst Where to return the tword. 8306 8309 * @param iSegReg The index of the segment register to use for … … 8308 8311 * @param GCPtrMem The address of the guest memory. 8309 8312 */ 8310 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(P IEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8313 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8311 8314 { 8312 8315 /* The lazy approach for now... */ 8313 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(p IemCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8316 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8314 8317 *pr80Dst = *pr80Src; 8315 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);8318 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R); 8316 8319 } 8317 8320 #endif … … 8322 8325 * 8323 8326 * @returns Strict VBox status code. 8324 * @param p IemCpu The IEM per CPU data.8327 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8325 8328 * @param pu128Dst Where to return the qword. 8326 8329 * @param iSegReg The index of the segment register to use for … … 8328 8331 * @param GCPtrMem The address of the guest memory. 8329 8332 */ 8330 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(P IEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8333 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8331 8334 { 8332 8335 /* The lazy approach for now... */ 8333 8336 uint128_t const *pu128Src; 8334 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8337 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8335 8338 if (rc == VINF_SUCCESS) 8336 8339 { 8337 8340 *pu128Dst = *pu128Src; 8338 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);8341 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 8339 8342 } 8340 8343 return rc; … … 8346 8349 * Fetches a data dqword (double qword), generally SSE related. 8347 8350 * 8348 * @param p IemCpu The IEM per CPU data.8351 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8349 8352 * @param pu128Dst Where to return the qword. 8350 8353 * @param iSegReg The index of the segment register to use for … … 8352 8355 * @param GCPtrMem The address of the guest memory. 8353 8356 */ 8354 IEM_STATIC void iemMemFetchDataU128Jmp(P IEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8357 IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8355 8358 { 8356 8359 /* The lazy approach for now... */ 8357 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8360 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8358 8361 *pu128Dst = *pu128Src; 8359 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);8362 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 8360 8363 } 8361 8364 #endif … … 8369 8372 * 8370 8373 * @returns Strict VBox status code. 8371 * @param p IemCpu The IEM per CPU data.8374 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8372 8375 * @param pu128Dst Where to return the qword. 8373 8376 * @param iSegReg The index of the segment register to use for … … 8375 8378 * @param GCPtrMem The address of the guest memory. 8376 8379 */ 8377 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(P IEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8380 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8378 8381 { 8379 8382 /* The lazy approach for now... */ 8380 8383 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 8381 8384 if ( (GCPtrMem & 15) 8382 && !(p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */8383 return iemRaiseGeneralProtectionFault0(p IemCpu);8385 && !(pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 8386 return iemRaiseGeneralProtectionFault0(pVCpu); 8384 8387 8385 8388 uint128_t const *pu128Src; 8386 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);8389 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 8387 8390 if (rc == VINF_SUCCESS) 8388 8391 { 8389 8392 *pu128Dst = *pu128Src; 8390 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);8393 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 8391 8394 } 8392 8395 return rc; … … 8401 8404 * Raises \#GP(0) if not aligned. 8402 8405 * 8403 * @param p IemCpu The IEM per CPU data.8406 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8404 8407 * @param pu128Dst Where to return the qword. 8405 8408 * @param iSegReg The index of the segment register to use for … … 8407 8410 * @param GCPtrMem The address of the guest memory. 8408 8411 */ 8409 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(P IEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)8412 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 8410 8413 { 8411 8414 /* The lazy approach for now... */ 8412 8415 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 8413 8416 if ( (GCPtrMem & 15) == 0 8414 || (p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */8415 { 8416 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(p IemCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,8417 || (pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 8418 { 8419 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, 8417 8420 IEM_ACCESS_DATA_R); 8418 8421 *pu128Dst = *pu128Src; 8419 iemMemCommitAndUnmapJmp(p IemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);8422 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 8420 8423 return; 8421 8424 } 8422 8425 8423 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(p IemCpu);8424 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));8426 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu); 8427 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 8425 8428 } 8426 8429 #endif … … 8432 8435 * 8433 8436 * @returns Strict VBox status code. 8434 * @param p IemCpu The IEM per CPU data.8437 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8435 8438 * @param pcbLimit Where to return the limit. 8436 8439 * @param pGCPtrBase Where to return the base. … … 8440 8443 * @param enmOpSize The effective operand size. 8441 8444 */ 8442 IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(P IEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,8445 IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg, 8443 8446 RTGCPTR GCPtrMem, IEMMODE enmOpSize) 8444 8447 { … … 8455 8458 */ 8456 8459 VBOXSTRICTRC rcStrict; 8457 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8458 { 8459 rcStrict = iemMemFetchDataU16(p IemCpu, pcbLimit, iSegReg, GCPtrMem);8460 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8461 { 8462 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem); 8460 8463 if (rcStrict == VINF_SUCCESS) 8461 rcStrict = iemMemFetchDataU64(p IemCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);8464 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2); 8462 8465 } 8463 8466 else … … 8466 8469 if (enmOpSize == IEMMODE_32BIT) 8467 8470 { 8468 if (IEM_GET_TARGET_CPU(p IemCpu) != IEMTARGETCPU_486)8471 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486) 8469 8472 { 8470 rcStrict = iemMemFetchDataU16(p IemCpu, pcbLimit, iSegReg, GCPtrMem);8473 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem); 8471 8474 if (rcStrict == VINF_SUCCESS) 8472 rcStrict = iemMemFetchDataU32(p IemCpu, &uTmp, iSegReg, GCPtrMem + 2);8475 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2); 8473 8476 } 8474 8477 else 8475 8478 { 8476 rcStrict = iemMemFetchDataU32(p IemCpu, &uTmp, iSegReg, GCPtrMem);8479 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem); 8477 8480 if (rcStrict == VINF_SUCCESS) 8478 8481 { 8479 8482 *pcbLimit = (uint16_t)uTmp; 8480 rcStrict = iemMemFetchDataU32(p IemCpu, &uTmp, iSegReg, GCPtrMem + 2);8483 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2); 8481 8484 } 8482 8485 } … … 8486 8489 else 8487 8490 { 8488 rcStrict = iemMemFetchDataU16(p IemCpu, pcbLimit, iSegReg, GCPtrMem);8491 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem); 8489 8492 if (rcStrict == VINF_SUCCESS) 8490 8493 { 8491 rcStrict = iemMemFetchDataU32(p IemCpu, &uTmp, iSegReg, GCPtrMem + 2);8494 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2); 8492 8495 if (rcStrict == VINF_SUCCESS) 8493 8496 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff); … … 8504 8507 * 8505 8508 * @returns Strict VBox status code. 8506 * @param p IemCpu The IEM per CPU data.8509 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8507 8510 * @param iSegReg The index of the segment register to use for 8508 8511 * this access. The base and limits are checked. … … 8510 8513 * @param u8Value The value to store. 8511 8514 */ 8512 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)8515 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) 8513 8516 { 8514 8517 /* The lazy approach for now... */ 8515 8518 uint8_t *pu8Dst; 8516 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8519 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8517 8520 if (rc == VINF_SUCCESS) 8518 8521 { 8519 8522 *pu8Dst = u8Value; 8520 rc = iemMemCommitAndUnmap(p IemCpu, pu8Dst, IEM_ACCESS_DATA_W);8523 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W); 8521 8524 } 8522 8525 return rc; … … 8528 8531 * Stores a data byte, longjmp on error. 8529 8532 * 8530 * @param p IemCpu The IEM per CPU data.8533 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8531 8534 * @param iSegReg The index of the segment register to use for 8532 8535 * this access. The base and limits are checked. … … 8534 8537 * @param u8Value The value to store. 8535 8538 */ 8536 IEM_STATIC void iemMemStoreDataU8Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)8539 IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) 8537 8540 { 8538 8541 /* The lazy approach for now... */ 8539 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(p IemCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8542 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8540 8543 *pu8Dst = u8Value; 8541 iemMemCommitAndUnmapJmp(p IemCpu, pu8Dst, IEM_ACCESS_DATA_W);8544 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W); 8542 8545 } 8543 8546 #endif … … 8548 8551 * 8549 8552 * @returns Strict VBox status code. 8550 * @param p IemCpu The IEM per CPU data.8553 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8551 8554 * @param iSegReg The index of the segment register to use for 8552 8555 * this access. The base and limits are checked. … … 8554 8557 * @param u16Value The value to store. 8555 8558 */ 8556 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)8559 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) 8557 8560 { 8558 8561 /* The lazy approach for now... */ 8559 8562 uint16_t *pu16Dst; 8560 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8563 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8561 8564 if (rc == VINF_SUCCESS) 8562 8565 { 8563 8566 *pu16Dst = u16Value; 8564 rc = iemMemCommitAndUnmap(p IemCpu, pu16Dst, IEM_ACCESS_DATA_W);8567 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W); 8565 8568 } 8566 8569 return rc; … … 8572 8575 * Stores a data word, longjmp on error. 8573 8576 * 8574 * @param p IemCpu The IEM per CPU data.8577 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8575 8578 * @param iSegReg The index of the segment register to use for 8576 8579 * this access. The base and limits are checked. … … 8578 8581 * @param u16Value The value to store. 8579 8582 */ 8580 IEM_STATIC void iemMemStoreDataU16Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)8583 IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) 8581 8584 { 8582 8585 /* The lazy approach for now... */ 8583 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(p IemCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8586 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8584 8587 *pu16Dst = u16Value; 8585 iemMemCommitAndUnmapJmp(p IemCpu, pu16Dst, IEM_ACCESS_DATA_W);8588 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W); 8586 8589 } 8587 8590 #endif … … 8592 8595 * 8593 8596 * @returns Strict VBox status code. 8594 * @param p IemCpu The IEM per CPU data.8597 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8595 8598 * @param iSegReg The index of the segment register to use for 8596 8599 * this access. The base and limits are checked. … … 8598 8601 * @param u32Value The value to store. 8599 8602 */ 8600 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)8603 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) 8601 8604 { 8602 8605 /* The lazy approach for now... */ 8603 8606 uint32_t *pu32Dst; 8604 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8605 8608 if (rc == VINF_SUCCESS) 8606 8609 { 8607 8610 *pu32Dst = u32Value; 8608 rc = iemMemCommitAndUnmap(p IemCpu, pu32Dst, IEM_ACCESS_DATA_W);8611 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W); 8609 8612 } 8610 8613 return rc; … … 8617 8620 * 8618 8621 * @returns Strict VBox status code. 8619 * @param p IemCpu The IEM per CPU data.8622 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8620 8623 * @param iSegReg The index of the segment register to use for 8621 8624 * this access. The base and limits are checked. … … 8623 8626 * @param u32Value The value to store. 8624 8627 */ 8625 IEM_STATIC void iemMemStoreDataU32Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)8628 IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) 8626 8629 { 8627 8630 /* The lazy approach for now... */ 8628 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(p IemCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8631 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8629 8632 *pu32Dst = u32Value; 8630 iemMemCommitAndUnmapJmp(p IemCpu, pu32Dst, IEM_ACCESS_DATA_W);8633 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W); 8631 8634 } 8632 8635 #endif … … 8637 8640 * 8638 8641 * @returns Strict VBox status code. 8639 * @param p IemCpu The IEM per CPU data.8642 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8640 8643 * @param iSegReg The index of the segment register to use for 8641 8644 * this access. The base and limits are checked. … … 8643 8646 * @param u64Value The value to store. 8644 8647 */ 8645 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)8648 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) 8646 8649 { 8647 8650 /* The lazy approach for now... */ 8648 8651 uint64_t *pu64Dst; 8649 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8652 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8650 8653 if (rc == VINF_SUCCESS) 8651 8654 { 8652 8655 *pu64Dst = u64Value; 8653 rc = iemMemCommitAndUnmap(p IemCpu, pu64Dst, IEM_ACCESS_DATA_W);8656 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W); 8654 8657 } 8655 8658 return rc; … … 8661 8664 * Stores a data qword, longjmp on error. 8662 8665 * 8663 * @param p IemCpu The IEM per CPU data.8666 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8664 8667 * @param iSegReg The index of the segment register to use for 8665 8668 * this access. The base and limits are checked. … … 8667 8670 * @param u64Value The value to store. 8668 8671 */ 8669 IEM_STATIC void iemMemStoreDataU64Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)8672 IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) 8670 8673 { 8671 8674 /* The lazy approach for now... */ 8672 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(p IemCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8675 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8673 8676 *pu64Dst = u64Value; 8674 iemMemCommitAndUnmapJmp(p IemCpu, pu64Dst, IEM_ACCESS_DATA_W);8677 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W); 8675 8678 } 8676 8679 #endif … … 8681 8684 * 8682 8685 * @returns Strict VBox status code. 8683 * @param p IemCpu The IEM per CPU data.8686 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8684 8687 * @param iSegReg The index of the segment register to use for 8685 8688 * this access. The base and limits are checked. … … 8687 8690 * @param u128Value The value to store. 8688 8691 */ 8689 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)8692 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) 8690 8693 { 8691 8694 /* The lazy approach for now... */ 8692 8695 uint128_t *pu128Dst; 8693 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8696 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8694 8697 if (rc == VINF_SUCCESS) 8695 8698 { 8696 8699 *pu128Dst = u128Value; 8697 rc = iemMemCommitAndUnmap(p IemCpu, pu128Dst, IEM_ACCESS_DATA_W);8700 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 8698 8701 } 8699 8702 return rc; … … 8705 8708 * Stores a data dqword, longjmp on error. 8706 8709 * 8707 * @param p IemCpu The IEM per CPU data.8710 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8708 8711 * @param iSegReg The index of the segment register to use for 8709 8712 * this access. The base and limits are checked. … … 8711 8714 * @param u128Value The value to store. 8712 8715 */ 8713 IEM_STATIC void iemMemStoreDataU128Jmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)8716 IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) 8714 8717 { 8715 8718 /* The lazy approach for now... */ 8716 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(p IemCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8719 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8717 8720 *pu128Dst = u128Value; 8718 iemMemCommitAndUnmapJmp(p IemCpu, pu128Dst, IEM_ACCESS_DATA_W);8721 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 8719 8722 } 8720 8723 #endif … … 8725 8728 * 8726 8729 * @returns Strict VBox status code. 8727 * @param p IemCpu The IEM per CPU data.8730 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8728 8731 * @param iSegReg The index of the segment register to use for 8729 8732 * this access. The base and limits are checked. … … 8731 8734 * @param u128Value The value to store. 8732 8735 */ 8733 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)8736 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) 8734 8737 { 8735 8738 /* The lazy approach for now... */ 8736 8739 if ( (GCPtrMem & 15) 8737 && !(p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */8738 return iemRaiseGeneralProtectionFault0(p IemCpu);8740 && !(pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 8741 return iemRaiseGeneralProtectionFault0(pVCpu); 8739 8742 8740 8743 uint128_t *pu128Dst; 8741 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8744 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8742 8745 if (rc == VINF_SUCCESS) 8743 8746 { 8744 8747 *pu128Dst = u128Value; 8745 rc = iemMemCommitAndUnmap(p IemCpu, pu128Dst, IEM_ACCESS_DATA_W);8748 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 8746 8749 } 8747 8750 return rc; … … 8754 8757 * 8755 8758 * @returns Strict VBox status code. 8756 * @param p IemCpu The IEM per CPU data.8759 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8757 8760 * @param iSegReg The index of the segment register to use for 8758 8761 * this access. The base and limits are checked. … … 8761 8764 */ 8762 8765 DECL_NO_INLINE(IEM_STATIC, void) 8763 iemMemStoreDataU128AlignedSseJmp(P IEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)8766 iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value) 8764 8767 { 8765 8768 /* The lazy approach for now... */ 8766 8769 if ( (GCPtrMem & 15) == 0 8767 || (p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */8768 { 8769 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(p IemCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);8770 || (pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 8771 { 8772 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 8770 8773 *pu128Dst = u128Value; 8771 iemMemCommitAndUnmapJmp(p IemCpu, pu128Dst, IEM_ACCESS_DATA_W);8774 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 8772 8775 return; 8773 8776 } 8774 8777 8775 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(p IemCpu);8776 longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));8778 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu); 8779 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 8777 8780 } 8778 8781 #endif … … 8783 8786 * 8784 8787 * @returns Strict VBox status code. 8785 * @param p IemCpu The IEM per CPU data.8788 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8786 8789 * @param cbLimit The limit. 8787 8790 * @param GCPtrBase The base address. … … 8791 8794 */ 8792 8795 IEM_STATIC VBOXSTRICTRC 8793 iemMemStoreDataXdtr(P IEMCPU pIemCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)8796 iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) 8794 8797 { 8795 8798 /* … … 8797 8800 * independent writes. The instructions does not respond to opsize prefixes. 8798 8801 */ 8799 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(p IemCpu, iSegReg, GCPtrMem, cbLimit);8802 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit); 8800 8803 if (rcStrict == VINF_SUCCESS) 8801 8804 { 8802 if (p IemCpu->enmCpuMode == IEMMODE_16BIT)8803 rcStrict = iemMemStoreDataU32(p IemCpu, iSegReg, GCPtrMem + 2,8804 IEM_GET_TARGET_CPU(p IemCpu) <= IEMTARGETCPU_2868805 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT) 8806 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, 8807 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286 8805 8808 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase); 8806 else if (p IemCpu->enmCpuMode == IEMMODE_32BIT)8807 rcStrict = iemMemStoreDataU32(p IemCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);8809 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT) 8810 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase); 8808 8811 else 8809 rcStrict = iemMemStoreDataU64(p IemCpu, iSegReg, GCPtrMem + 2, GCPtrBase);8812 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase); 8810 8813 } 8811 8814 return rcStrict; … … 8817 8820 * 8818 8821 * @returns Strict VBox status code. 8819 * @param p IemCpu The IEM per CPU data.8822 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8820 8823 * @param u16Value The value to push. 8821 8824 */ 8822 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(P IEMCPU pIemCpu, uint16_t u16Value)8825 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value) 8823 8826 { 8824 8827 /* Increment the stack pointer. */ 8825 8828 uint64_t uNewRsp; 8826 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);8827 RTGCPTR GCPtrTop = iemRegGetRspForPush(p IemCpu, pCtx, 2, &uNewRsp);8829 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 8830 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp); 8828 8831 8829 8832 /* Write the word the lazy way. */ 8830 8833 uint16_t *pu16Dst; 8831 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);8834 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 8832 8835 if (rc == VINF_SUCCESS) 8833 8836 { 8834 8837 *pu16Dst = u16Value; 8835 rc = iemMemCommitAndUnmap(p IemCpu, pu16Dst, IEM_ACCESS_STACK_W);8838 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W); 8836 8839 } 8837 8840 … … 8848 8851 * 8849 8852 * @returns Strict VBox status code. 8850 * @param p IemCpu The IEM per CPU data.8853 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8851 8854 * @param u32Value The value to push. 8852 8855 */ 8853 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(P IEMCPU pIemCpu, uint32_t u32Value)8856 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value) 8854 8857 { 8855 8858 /* Increment the stack pointer. */ 8856 8859 uint64_t uNewRsp; 8857 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);8858 RTGCPTR GCPtrTop = iemRegGetRspForPush(p IemCpu, pCtx, 4, &uNewRsp);8860 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 8861 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp); 8859 8862 8860 8863 /* Write the dword the lazy way. */ 8861 8864 uint32_t *pu32Dst; 8862 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);8865 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 8863 8866 if (rc == VINF_SUCCESS) 8864 8867 { 8865 8868 *pu32Dst = u32Value; 8866 rc = iemMemCommitAndUnmap(p IemCpu, pu32Dst, IEM_ACCESS_STACK_W);8869 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W); 8867 8870 } 8868 8871 … … 8879 8882 * 8880 8883 * @returns Strict VBox status code. 8881 * @param p IemCpu The IEM per CPU data.8884 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8882 8885 * @param u32Value The value to push. 8883 8886 */ 8884 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(P IEMCPU pIemCpu, uint32_t u32Value)8887 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value) 8885 8888 { 8886 8889 /* Increment the stack pointer. */ 8887 8890 uint64_t uNewRsp; 8888 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);8889 RTGCPTR GCPtrTop = iemRegGetRspForPush(p IemCpu, pCtx, 4, &uNewRsp);8891 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 8892 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp); 8890 8893 8891 8894 VBOXSTRICTRC rc; 8892 if (IEM_FULL_VERIFICATION_REM_ENABLED(p IemCpu))8895 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 8893 8896 { 8894 8897 /* The recompiler writes a full dword. */ 8895 8898 uint32_t *pu32Dst; 8896 rc = iemMemMap(p IemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);8899 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 8897 8900 if (rc == VINF_SUCCESS) 8898 8901 { 8899 8902 *pu32Dst = u32Value; 8900 rc = iemMemCommitAndUnmap(p IemCpu, pu32Dst, IEM_ACCESS_STACK_W);8903 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W); 8901 8904 } 8902 8905 } … … 8914 8917 * ancient hardware when it actually did change. */ 8915 8918 uint16_t *pu16Dst; 8916 rc = iemMemMap(p IemCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);8919 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW); 8917 8920 if (rc == VINF_SUCCESS) 8918 8921 { 8919 8922 *pu16Dst = (uint16_t)u32Value; 8920 rc = iemMemCommitAndUnmap(p IemCpu, pu16Dst, IEM_ACCESS_STACK_RW);8923 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW); 8921 8924 } 8922 8925 } … … 8934 8937 * 8935 8938 * @returns Strict VBox status code. 8936 * @param p IemCpu The IEM per CPU data.8939 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8937 8940 * @param u64Value The value to push. 8938 8941 */ 8939 IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(P IEMCPU pIemCpu, uint64_t u64Value)8942 IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value) 8940 8943 { 8941 8944 /* Increment the stack pointer. */ 8942 8945 uint64_t uNewRsp; 8943 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);8944 RTGCPTR GCPtrTop = iemRegGetRspForPush(p IemCpu, pCtx, 8, &uNewRsp);8946 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 8947 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp); 8945 8948 8946 8949 /* Write the word the lazy way. */ 8947 8950 uint64_t *pu64Dst; 8948 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);8951 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 8949 8952 if (rc == VINF_SUCCESS) 8950 8953 { 8951 8954 *pu64Dst = u64Value; 8952 rc = iemMemCommitAndUnmap(p IemCpu, pu64Dst, IEM_ACCESS_STACK_W);8955 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W); 8953 8956 } 8954 8957 … … 8965 8968 * 8966 8969 * @returns Strict VBox status code. 8967 * @param p IemCpu The IEM per CPU data.8970 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8968 8971 * @param pu16Value Where to store the popped value. 8969 8972 */ 8970 IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(P IEMCPU pIemCpu, uint16_t *pu16Value)8973 IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value) 8971 8974 { 8972 8975 /* Increment the stack pointer. */ 8973 8976 uint64_t uNewRsp; 8974 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);8975 RTGCPTR GCPtrTop = iemRegGetRspForPop(p IemCpu, pCtx, 2, &uNewRsp);8977 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 8978 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp); 8976 8979 8977 8980 /* Write the word the lazy way. */ 8978 8981 uint16_t const *pu16Src; 8979 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);8982 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 8980 8983 if (rc == VINF_SUCCESS) 8981 8984 { 8982 8985 *pu16Value = *pu16Src; 8983 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);8986 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R); 8984 8987 8985 8988 /* Commit the new RSP value. */ … … 8996 8999 * 8997 9000 * @returns Strict VBox status code. 8998 * @param p IemCpu The IEM per CPU data.9001 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8999 9002 * @param pu32Value Where to store the popped value. 9000 9003 */ 9001 IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(P IEMCPU pIemCpu, uint32_t *pu32Value)9004 IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value) 9002 9005 { 9003 9006 /* Increment the stack pointer. */ 9004 9007 uint64_t uNewRsp; 9005 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9006 RTGCPTR GCPtrTop = iemRegGetRspForPop(p IemCpu, pCtx, 4, &uNewRsp);9008 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9009 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp); 9007 9010 9008 9011 /* Write the word the lazy way. */ 9009 9012 uint32_t const *pu32Src; 9010 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);9013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 9011 9014 if (rc == VINF_SUCCESS) 9012 9015 { 9013 9016 *pu32Value = *pu32Src; 9014 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);9017 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R); 9015 9018 9016 9019 /* Commit the new RSP value. */ … … 9027 9030 * 9028 9031 * @returns Strict VBox status code. 9029 * @param p IemCpu The IEM per CPU data.9032 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9030 9033 * @param pu64Value Where to store the popped value. 9031 9034 */ 9032 IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(P IEMCPU pIemCpu, uint64_t *pu64Value)9035 IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value) 9033 9036 { 9034 9037 /* Increment the stack pointer. */ 9035 9038 uint64_t uNewRsp; 9036 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9037 RTGCPTR GCPtrTop = iemRegGetRspForPop(p IemCpu, pCtx, 8, &uNewRsp);9039 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9040 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp); 9038 9041 9039 9042 /* Write the word the lazy way. */ 9040 9043 uint64_t const *pu64Src; 9041 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);9044 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 9042 9045 if (rc == VINF_SUCCESS) 9043 9046 { 9044 9047 *pu64Value = *pu64Src; 9045 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);9048 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R); 9046 9049 9047 9050 /* Commit the new RSP value. */ … … 9058 9061 * 9059 9062 * @returns Strict VBox status code. 9060 * @param p IemCpu The IEM per CPU data.9063 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9061 9064 * @param u16Value The value to push. 9062 9065 * @param pTmpRsp Pointer to the temporary stack pointer. 9063 9066 */ 9064 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(P IEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)9067 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) 9065 9068 { 9066 9069 /* Increment the stack pointer. */ 9067 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9070 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9068 9071 RTUINT64U NewRsp = *pTmpRsp; 9069 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(p IemCpu, pCtx, &NewRsp, 2);9072 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2); 9070 9073 9071 9074 /* Write the word the lazy way. */ 9072 9075 uint16_t *pu16Dst; 9073 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);9076 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 9074 9077 if (rc == VINF_SUCCESS) 9075 9078 { 9076 9079 *pu16Dst = u16Value; 9077 rc = iemMemCommitAndUnmap(p IemCpu, pu16Dst, IEM_ACCESS_STACK_W);9080 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W); 9078 9081 } 9079 9082 … … 9090 9093 * 9091 9094 * @returns Strict VBox status code. 9092 * @param p IemCpu The IEM per CPU data.9095 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9093 9096 * @param u32Value The value to push. 9094 9097 * @param pTmpRsp Pointer to the temporary stack pointer. 9095 9098 */ 9096 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(P IEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)9099 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) 9097 9100 { 9098 9101 /* Increment the stack pointer. */ 9099 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9102 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9100 9103 RTUINT64U NewRsp = *pTmpRsp; 9101 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(p IemCpu, pCtx, &NewRsp, 4);9104 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4); 9102 9105 9103 9106 /* Write the word the lazy way. */ 9104 9107 uint32_t *pu32Dst; 9105 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);9108 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 9106 9109 if (rc == VINF_SUCCESS) 9107 9110 { 9108 9111 *pu32Dst = u32Value; 9109 rc = iemMemCommitAndUnmap(p IemCpu, pu32Dst, IEM_ACCESS_STACK_W);9112 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W); 9110 9113 } 9111 9114 … … 9122 9125 * 9123 9126 * @returns Strict VBox status code. 9124 * @param p IemCpu The IEM per CPU data.9127 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9125 9128 * @param u64Value The value to push. 9126 9129 * @param pTmpRsp Pointer to the temporary stack pointer. 9127 9130 */ 9128 IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(P IEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)9131 IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) 9129 9132 { 9130 9133 /* Increment the stack pointer. */ 9131 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9134 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9132 9135 RTUINT64U NewRsp = *pTmpRsp; 9133 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(p IemCpu, pCtx, &NewRsp, 8);9136 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8); 9134 9137 9135 9138 /* Write the word the lazy way. */ 9136 9139 uint64_t *pu64Dst; 9137 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);9140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 9138 9141 if (rc == VINF_SUCCESS) 9139 9142 { 9140 9143 *pu64Dst = u64Value; 9141 rc = iemMemCommitAndUnmap(p IemCpu, pu64Dst, IEM_ACCESS_STACK_W);9144 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W); 9142 9145 } 9143 9146 … … 9154 9157 * 9155 9158 * @returns Strict VBox status code. 9156 * @param p IemCpu The IEM per CPU data.9159 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9157 9160 * @param pu16Value Where to store the popped value. 9158 9161 * @param pTmpRsp Pointer to the temporary stack pointer. 9159 9162 */ 9160 IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(P IEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)9163 IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) 9161 9164 { 9162 9165 /* Increment the stack pointer. */ 9163 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9166 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9164 9167 RTUINT64U NewRsp = *pTmpRsp; 9165 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(p IemCpu, pCtx, &NewRsp, 2);9168 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2); 9166 9169 9167 9170 /* Write the word the lazy way. */ 9168 9171 uint16_t const *pu16Src; 9169 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);9172 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 9170 9173 if (rc == VINF_SUCCESS) 9171 9174 { 9172 9175 *pu16Value = *pu16Src; 9173 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);9176 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R); 9174 9177 9175 9178 /* Commit the new RSP value. */ … … 9186 9189 * 9187 9190 * @returns Strict VBox status code. 9188 * @param p IemCpu The IEM per CPU data.9191 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9189 9192 * @param pu32Value Where to store the popped value. 9190 9193 * @param pTmpRsp Pointer to the temporary stack pointer. 9191 9194 */ 9192 IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(P IEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)9195 IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) 9193 9196 { 9194 9197 /* Increment the stack pointer. */ 9195 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9198 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9196 9199 RTUINT64U NewRsp = *pTmpRsp; 9197 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(p IemCpu, pCtx, &NewRsp, 4);9200 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4); 9198 9201 9199 9202 /* Write the word the lazy way. */ 9200 9203 uint32_t const *pu32Src; 9201 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);9204 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 9202 9205 if (rc == VINF_SUCCESS) 9203 9206 { 9204 9207 *pu32Value = *pu32Src; 9205 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);9208 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R); 9206 9209 9207 9210 /* Commit the new RSP value. */ … … 9218 9221 * 9219 9222 * @returns Strict VBox status code. 9220 * @param p IemCpu The IEM per CPU data.9223 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9221 9224 * @param pu64Value Where to store the popped value. 9222 9225 * @param pTmpRsp Pointer to the temporary stack pointer. 9223 9226 */ 9224 IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(P IEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)9227 IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) 9225 9228 { 9226 9229 /* Increment the stack pointer. */ 9227 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9230 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9228 9231 RTUINT64U NewRsp = *pTmpRsp; 9229 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(p IemCpu, pCtx, &NewRsp, 8);9232 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8); 9230 9233 9231 9234 /* Write the word the lazy way. */ 9232 9235 uint64_t const *pu64Src; 9233 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);9236 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 9234 9237 if (rcStrict == VINF_SUCCESS) 9235 9238 { 9236 9239 *pu64Value = *pu64Src; 9237 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);9240 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R); 9238 9241 9239 9242 /* Commit the new RSP value. */ … … 9252 9255 * 9253 9256 * @returns Strict VBox status code. 9254 * @param p IemCpu The IEM per CPU data.9257 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9255 9258 * @param cbMem The number of bytes to push onto the stack. 9256 9259 * @param ppvMem Where to return the pointer to the stack memory. … … 9263 9266 * iemMemStackPushCommitSpecial(). 9264 9267 */ 9265 IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(P IEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)9268 IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) 9266 9269 { 9267 9270 Assert(cbMem < UINT8_MAX); 9268 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9269 RTGCPTR GCPtrTop = iemRegGetRspForPush(p IemCpu, pCtx, (uint8_t)cbMem, puNewRsp);9270 return iemMemMap(p IemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);9271 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9272 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp); 9273 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 9271 9274 } 9272 9275 … … 9278 9281 * 9279 9282 * @returns Strict VBox status code. 9280 * @param p IemCpu The IEM per CPU data.9283 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9281 9284 * @param pvMem The pointer returned by 9282 9285 * iemMemStackPushBeginSpecial(). … … 9284 9287 * iemMemStackPushBeginSpecial(). 9285 9288 */ 9286 IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(P IEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)9287 { 9288 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(p IemCpu, pvMem, IEM_ACCESS_STACK_W);9289 IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp) 9290 { 9291 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W); 9289 9292 if (rcStrict == VINF_SUCCESS) 9290 p IemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;9293 pVCpu->iem.s.CTX_SUFF(pCtx)->rsp = uNewRsp; 9291 9294 return rcStrict; 9292 9295 } … … 9299 9302 * 9300 9303 * @returns Strict VBox status code. 9301 * @param p IemCpu The IEM per CPU data.9304 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9302 9305 * @param cbMem The number of bytes to push onto the stack. 9303 9306 * @param ppvMem Where to return the pointer to the stack memory. … … 9307 9310 * manually if iemMemStackPopDoneSpecial() is used. 9308 9311 */ 9309 IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(P IEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)9312 IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) 9310 9313 { 9311 9314 Assert(cbMem < UINT8_MAX); 9312 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9313 RTGCPTR GCPtrTop = iemRegGetRspForPop(p IemCpu, pCtx, (uint8_t)cbMem, puNewRsp);9314 return iemMemMap(p IemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);9315 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9316 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp); 9317 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 9315 9318 } 9316 9319 … … 9322 9325 * 9323 9326 * @returns Strict VBox status code. 9324 * @param p IemCpu The IEM per CPU data.9327 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9325 9328 * @param cbMem The number of bytes to push onto the stack. 9326 9329 * @param ppvMem Where to return the pointer to the stack memory. … … 9330 9333 * manually if iemMemStackPopDoneSpecial() is used. 9331 9334 */ 9332 IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(P IEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)9335 IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) 9333 9336 { 9334 9337 Assert(cbMem < UINT8_MAX); 9335 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9338 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9336 9339 RTUINT64U NewRsp; 9337 9340 NewRsp.u = *puNewRsp; 9338 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(p IemCpu, pCtx, &NewRsp, 8);9341 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8); 9339 9342 *puNewRsp = NewRsp.u; 9340 return iemMemMap(p IemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);9343 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 9341 9344 } 9342 9345 … … 9348 9351 * 9349 9352 * @returns Strict VBox status code. 9350 * @param p IemCpu The IEM per CPU data.9353 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9351 9354 * @param pvMem The pointer returned by 9352 9355 * iemMemStackPopBeginSpecial(). … … 9354 9357 * iemMemStackPopBeginSpecial(). 9355 9358 */ 9356 IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(P IEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)9357 { 9358 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);9359 IEM_STATIC VBOXSTRICTRC iemMemStackPopCommitSpecial(PVMCPU pVCpu, void const *pvMem, uint64_t uNewRsp) 9360 { 9361 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R); 9359 9362 if (rcStrict == VINF_SUCCESS) 9360 p IemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;9363 pVCpu->iem.s.CTX_SUFF(pCtx)->rsp = uNewRsp; 9361 9364 return rcStrict; 9362 9365 } … … 9370 9373 * 9371 9374 * @returns Strict VBox status code. 9372 * @param p IemCpu The IEM per CPU data.9375 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9373 9376 * @param pvMem The pointer returned by 9374 9377 * iemMemStackPopBeginSpecial() or 9375 9378 * iemMemStackPopContinueSpecial(). 9376 9379 */ 9377 IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(P IEMCPU pIemCpu, void const *pvMem)9378 { 9379 return iemMemCommitAndUnmap(p IemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);9380 IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem) 9381 { 9382 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R); 9380 9383 } 9381 9384 … … 9385 9388 * 9386 9389 * @returns Strict VBox status code. 9387 * @param p IemCpu The IEM per CPU data.9390 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9388 9391 * @param pbDst Where to return the byte. 9389 9392 * @param iSegReg The index of the segment register to use for … … 9391 9394 * @param GCPtrMem The address of the guest memory. 9392 9395 */ 9393 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(P IEMCPU pIemCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)9396 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) 9394 9397 { 9395 9398 /* The lazy approach for now... */ 9396 9399 uint8_t const *pbSrc; 9397 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);9400 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); 9398 9401 if (rc == VINF_SUCCESS) 9399 9402 { 9400 9403 *pbDst = *pbSrc; 9401 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);9404 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R); 9402 9405 } 9403 9406 return rc; … … 9409 9412 * 9410 9413 * @returns Strict VBox status code. 9411 * @param p IemCpu The IEM per CPU data.9414 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9412 9415 * @param pu16Dst Where to return the word. 9413 9416 * @param iSegReg The index of the segment register to use for … … 9415 9418 * @param GCPtrMem The address of the guest memory. 9416 9419 */ 9417 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(P IEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)9420 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 9418 9421 { 9419 9422 /* The lazy approach for now... */ 9420 9423 uint16_t const *pu16Src; 9421 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);9424 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); 9422 9425 if (rc == VINF_SUCCESS) 9423 9426 { 9424 9427 *pu16Dst = *pu16Src; 9425 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);9428 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R); 9426 9429 } 9427 9430 return rc; … … 9433 9436 * 9434 9437 * @returns Strict VBox status code. 9435 * @param p IemCpu The IEM per CPU data.9438 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9436 9439 * @param pu32Dst Where to return the dword. 9437 9440 * @param iSegReg The index of the segment register to use for … … 9439 9442 * @param GCPtrMem The address of the guest memory. 9440 9443 */ 9441 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(P IEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)9444 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 9442 9445 { 9443 9446 /* The lazy approach for now... */ 9444 9447 uint32_t const *pu32Src; 9445 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);9448 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); 9446 9449 if (rc == VINF_SUCCESS) 9447 9450 { 9448 9451 *pu32Dst = *pu32Src; 9449 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);9452 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R); 9450 9453 } 9451 9454 return rc; … … 9457 9460 * 9458 9461 * @returns Strict VBox status code. 9459 * @param p IemCpu The IEM per CPU data.9462 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9460 9463 * @param pu64Dst Where to return the qword. 9461 9464 * @param iSegReg The index of the segment register to use for … … 9463 9466 * @param GCPtrMem The address of the guest memory. 9464 9467 */ 9465 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(P IEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)9468 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 9466 9469 { 9467 9470 /* The lazy approach for now... */ 9468 9471 uint64_t const *pu64Src; 9469 VBOXSTRICTRC rc = iemMemMap(p IemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);9472 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); 9470 9473 if (rc == VINF_SUCCESS) 9471 9474 { 9472 9475 *pu64Dst = *pu64Src; 9473 rc = iemMemCommitAndUnmap(p IemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);9476 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R); 9474 9477 } 9475 9478 return rc; … … 9481 9484 * 9482 9485 * @returns Strict VBox status code. 9483 * @param p IemCpu The IEM per CPU.9486 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9484 9487 * @param pDesc Where to return the descriptor table entry. 9485 9488 * @param uSel The selector which table entry to fetch. … … 9488 9491 */ 9489 9492 IEM_STATIC VBOXSTRICTRC 9490 iemMemFetchSelDescWithErr(P IEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)9493 iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode) 9491 9494 { 9492 9495 AssertPtr(pDesc); 9493 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9496 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9494 9497 9495 9498 /** @todo did the 286 require all 8 bytes to be accessible? */ … … 9505 9508 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n", 9506 9509 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel)); 9507 return iemRaiseXcptOrInt(p IemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,9510 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 9508 9511 uErrorCode, 0); 9509 9512 } … … 9517 9520 { 9518 9521 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt)); 9519 return iemRaiseXcptOrInt(p IemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,9522 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 9520 9523 uErrorCode, 0); 9521 9524 } … … 9527 9530 * required. 9528 9531 */ 9529 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(p IemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));9532 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK)); 9530 9533 if (rcStrict == VINF_SUCCESS) 9531 9534 { 9532 if ( !IEM_IS_LONG_MODE(p IemCpu)9535 if ( !IEM_IS_LONG_MODE(pVCpu) 9533 9536 || pDesc->Legacy.Gen.u1DescType) 9534 9537 pDesc->Long.au64[1] = 0; 9535 9538 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt)) 9536 rcStrict = iemMemFetchSysU64(p IemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);9539 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1); 9537 9540 else 9538 9541 { 9539 9542 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel)); 9540 9543 /** @todo is this the right exception? */ 9541 return iemRaiseXcptOrInt(p IemCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);9544 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0); 9542 9545 } 9543 9546 } … … 9550 9553 * 9551 9554 * @returns Strict VBox status code. 9552 * @param p IemCpu The IEM per CPU.9555 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9553 9556 * @param pDesc Where to return the descriptor table entry. 9554 9557 * @param uSel The selector which table entry to fetch. 9555 9558 * @param uXcpt The exception to raise on table lookup error. 9556 9559 */ 9557 IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(P IEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)9558 { 9559 return iemMemFetchSelDescWithErr(p IemCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);9560 IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) 9561 { 9562 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL); 9560 9563 } 9561 9564 … … 9586 9589 * 9587 9590 * @returns Strict VBox status code. 9588 * @param p IemCpu The IEM per CPU.9591 * @param pVCpu The cross context virtual CPU structure of the calling thread. 9589 9592 * @param uSel The selector. 9590 9593 */ 9591 IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(P IEMCPU pIemCpu, uint16_t uSel)9592 { 9593 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);9594 IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel) 9595 { 9596 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 9594 9597 9595 9598 /* … … 9612 9615 /* The normal case, map the 32-bit bits around the accessed bit (40). */ 9613 9616 GCPtr += 2 + 2; 9614 rcStrict = iemMemMap(p IemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);9617 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW); 9615 9618 if (rcStrict != VINF_SUCCESS) 9616 9619 return rcStrict; … … 9620 9623 { 9621 9624 /* The misaligned GDT/LDT case, map the whole thing. */ 9622 rcStrict = iemMemMap(p IemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);9625 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW); 9623 9626 if (rcStrict != VINF_SUCCESS) 9624 9627 return rcStrict; … … 9632 9635 } 9633 9636 9634 return iemMemCommitAndUnmap(p IemCpu, (void *)pu32, IEM_ACCESS_SYS_RW);9637 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW); 9635 9638 } 9636 9639 … … 9667 9670 9668 9671 9669 #define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(p IemCpu)9670 #define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(p IemCpu, a_i8))9671 #define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(p IemCpu, a_i16))9672 #define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(p IemCpu, a_i32))9673 #define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((p IemCpu), (a_u16NewIP)))9674 #define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((p IemCpu), (a_u32NewIP)))9675 #define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((p IemCpu), (a_u64NewIP)))9676 #define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(p IemCpu)9672 #define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu) 9673 #define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8)) 9674 #define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16)) 9675 #define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32)) 9676 #define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP))) 9677 #define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP))) 9678 #define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP))) 9679 #define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu) 9677 9680 #define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \ 9678 9681 do { \ 9679 if ((p IemCpu)->CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \9680 return iemRaiseDeviceNotAvailable(p IemCpu); \9682 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \ 9683 return iemRaiseDeviceNotAvailable(pVCpu); \ 9681 9684 } while (0) 9682 9685 #define IEM_MC_MAYBE_RAISE_FPU_XCPT() \ 9683 9686 do { \ 9684 if ((p IemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \9685 return iemRaiseMathFault(p IemCpu); \9687 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \ 9688 return iemRaiseMathFault(pVCpu); \ 9686 9689 } while (0) 9687 9690 #define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \ 9688 9691 do { \ 9689 if ( (p IemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \9690 || !(p IemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \9691 || !IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse2) \9692 return iemRaiseUndefinedOpcode(p IemCpu); \9693 if (p IemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \9694 return iemRaiseDeviceNotAvailable(p IemCpu); \9692 if ( (pVCpu->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ 9693 || !(pVCpu->iem.s.CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \ 9694 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \ 9695 return iemRaiseUndefinedOpcode(pVCpu); \ 9696 if (pVCpu->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ 9697 return iemRaiseDeviceNotAvailable(pVCpu); \ 9695 9698 } while (0) 9696 9699 #define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \ 9697 9700 do { \ 9698 if ( (p IemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \9699 || !(p IemCpu->CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \9700 || !IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse) \9701 return iemRaiseUndefinedOpcode(p IemCpu); \9702 if (p IemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \9703 return iemRaiseDeviceNotAvailable(p IemCpu); \9701 if ( (pVCpu->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ 9702 || !(pVCpu->iem.s.CTX_SUFF(pCtx)->cr4 & X86_CR4_OSFXSR) \ 9703 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \ 9704 return iemRaiseUndefinedOpcode(pVCpu); \ 9705 if (pVCpu->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ 9706 return iemRaiseDeviceNotAvailable(pVCpu); \ 9704 9707 } while (0) 9705 9708 #define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \ 9706 9709 do { \ 9707 if ( ((p IemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \9708 || !IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fMmx) \9709 return iemRaiseUndefinedOpcode(p IemCpu); \9710 if (p IemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \9711 return iemRaiseDeviceNotAvailable(p IemCpu); \9710 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ 9711 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \ 9712 return iemRaiseUndefinedOpcode(pVCpu); \ 9713 if (pVCpu->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ 9714 return iemRaiseDeviceNotAvailable(pVCpu); \ 9712 9715 } while (0) 9713 9716 #define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \ 9714 9717 do { \ 9715 if ( ((p IemCpu)->CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \9716 || ( !IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse \9717 && !IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fAmdMmxExts) ) \9718 return iemRaiseUndefinedOpcode(p IemCpu); \9719 if (p IemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \9720 return iemRaiseDeviceNotAvailable(p IemCpu); \9718 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \ 9719 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \ 9720 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \ 9721 return iemRaiseUndefinedOpcode(pVCpu); \ 9722 if (pVCpu->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_TS) \ 9723 return iemRaiseDeviceNotAvailable(pVCpu); \ 9721 9724 } while (0) 9722 9725 #define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \ 9723 9726 do { \ 9724 if (p IemCpu->uCpl != 0) \9725 return iemRaiseGeneralProtectionFault0(p IemCpu); \9727 if (pVCpu->iem.s.uCpl != 0) \ 9728 return iemRaiseGeneralProtectionFault0(pVCpu); \ 9726 9729 } while (0) 9727 9730 … … 9737 9740 uint32_t *a_pName = &a_Name 9738 9741 #define IEM_MC_COMMIT_EFLAGS(a_EFlags) \ 9739 do { (p IemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)9742 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0) 9740 9743 9741 9744 #define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst) 9742 9745 #define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN 9743 9746 9744 #define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(p IemCpu, (a_iGReg))9745 #define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(p IemCpu, (a_iGReg))9746 #define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(p IemCpu, (a_iGReg))9747 #define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(p IemCpu, (a_iGReg))9748 #define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(p IemCpu, (a_iGReg))9749 #define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(p IemCpu, (a_iGReg))9750 #define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(p IemCpu, (a_iGReg))9751 #define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(p IemCpu, (a_iGReg))9752 #define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(p IemCpu, (a_iGReg))9753 #define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(p IemCpu, (a_iGReg))9754 #define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(p IemCpu, (a_iGReg))9755 #define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(p IemCpu, (a_iGReg))9756 #define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(p IemCpu, (a_iGReg))9757 #define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(p IemCpu, (a_iGReg))9758 #define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(p IemCpu, (a_iGReg))9759 #define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(p IemCpu, (a_iGReg))9747 #define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg)) 9748 #define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg)) 9749 #define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg)) 9750 #define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg)) 9751 #define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg)) 9752 #define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg)) 9753 #define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg)) 9754 #define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg)) 9755 #define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg)) 9756 #define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg)) 9757 #define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg)) 9758 #define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg)) 9759 #define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg)) 9760 #define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg)) 9761 #define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg)) 9762 #define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg)) 9760 9763 #define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64 9761 #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(p IemCpu, (a_iSReg))9762 #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(p IemCpu, (a_iSReg))9763 #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(p IemCpu, (a_iSReg))9764 #define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(p IemCpu)->CTX_SUFF(pCtx)->cr09765 #define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(p IemCpu)->CTX_SUFF(pCtx)->cr09766 #define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (p IemCpu)->CTX_SUFF(pCtx)->cr09767 #define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (p IemCpu)->CTX_SUFF(pCtx)->ldtr.Sel9768 #define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (p IemCpu)->CTX_SUFF(pCtx)->ldtr.Sel9769 #define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (p IemCpu)->CTX_SUFF(pCtx)->ldtr.Sel9770 #define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (p IemCpu)->CTX_SUFF(pCtx)->tr.Sel9771 #define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (p IemCpu)->CTX_SUFF(pCtx)->tr.Sel9772 #define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (p IemCpu)->CTX_SUFF(pCtx)->tr.Sel9764 #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)) 9765 #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)) 9766 #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)) 9767 #define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 9768 #define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 9769 #define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 9770 #define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel 9771 #define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel 9772 #define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel 9773 #define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel 9774 #define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel 9775 #define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel 9773 9776 /** @note Not for IOPL or IF testing or modification. */ 9774 #define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (p IemCpu)->CTX_SUFF(pCtx)->eflags.u9775 #define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(p IemCpu)->CTX_SUFF(pCtx)->eflags.u9776 #define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW9777 #define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW9778 9779 #define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(p IemCpu, (a_iGReg)) = (a_u8Value)9780 #define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(p IemCpu, (a_iGReg)) = (a_u16Value)9781 #define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */9782 #define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) = (a_u64Value)9777 #define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u 9778 #define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u 9779 #define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW 9780 #define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW 9781 9782 #define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pVCpu, (a_iGReg)) = (a_u8Value) 9783 #define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pVCpu, (a_iGReg)) = (a_u16Value) 9784 #define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */ 9785 #define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) = (a_u64Value) 9783 9786 #define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8 9784 9787 #define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16 9785 9788 #define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32 9786 9789 #define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64 9787 #define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) &= UINT32_MAX9790 #define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) &= UINT32_MAX 9788 9791 #define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0) 9789 9792 #define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \ 9790 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)9791 9792 #define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(p IemCpu, (a_iGReg))9793 #define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(p IemCpu, (a_iGReg))9793 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0) 9794 9795 #define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pVCpu, (a_iGReg)) 9796 #define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pVCpu, (a_iGReg)) 9794 9797 /** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit. 9795 9798 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */ 9796 #define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(p IemCpu, (a_iGReg))9797 #define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(p IemCpu, (a_iGReg))9799 #define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pVCpu, (a_iGReg)) 9800 #define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) 9798 9801 /** @note Not for IOPL or IF testing or modification. */ 9799 #define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(p IemCpu)->CTX_SUFF(pCtx)->eflags.u9800 9801 #define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(p IemCpu, (a_iGReg)) += (a_u8Value)9802 #define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(p IemCpu, (a_iGReg)) += (a_u16Value)9802 #define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u 9803 9804 #define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pVCpu, (a_iGReg)) += (a_u8Value) 9805 #define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pVCpu, (a_iGReg)) += (a_u16Value) 9803 9806 #define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \ 9804 9807 do { \ 9805 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(p IemCpu, (a_iGReg)); \9808 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pVCpu, (a_iGReg)); \ 9806 9809 *pu32Reg += (a_u32Value); \ 9807 9810 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \ 9808 9811 } while (0) 9809 #define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) += (a_u64Value)9810 9811 #define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(p IemCpu, (a_iGReg)) -= (a_u8Value)9812 #define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(p IemCpu, (a_iGReg)) -= (a_u16Value)9812 #define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) += (a_u64Value) 9813 9814 #define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pVCpu, (a_iGReg)) -= (a_u8Value) 9815 #define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pVCpu, (a_iGReg)) -= (a_u16Value) 9813 9816 #define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \ 9814 9817 do { \ 9815 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(p IemCpu, (a_iGReg)); \9818 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pVCpu, (a_iGReg)); \ 9816 9819 *pu32Reg -= (a_u32Value); \ 9817 9820 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \ 9818 9821 } while (0) 9819 #define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) -= (a_u64Value)9822 #define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) -= (a_u64Value) 9820 9823 #define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0) 9821 9824 9822 #define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( p IemCpu, (a_iGReg)); } while (0)9823 #define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(p IemCpu, (a_iGReg)); } while (0)9824 #define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(p IemCpu, (a_iGReg)); } while (0)9825 #define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(p IemCpu, (a_iGReg)); } while (0)9825 #define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0) 9826 #define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0) 9827 #define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0) 9828 #define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0) 9826 9829 #define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0) 9827 9830 #define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0) … … 9853 9856 #define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0) 9854 9857 9855 #define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(p IemCpu, (a_iGReg)) &= (a_u8Value)9856 #define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(p IemCpu, (a_iGReg)) &= (a_u16Value)9858 #define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pVCpu, (a_iGReg)) &= (a_u8Value) 9859 #define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pVCpu, (a_iGReg)) &= (a_u16Value) 9857 9860 #define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \ 9858 9861 do { \ 9859 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(p IemCpu, (a_iGReg)); \9862 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pVCpu, (a_iGReg)); \ 9860 9863 *pu32Reg &= (a_u32Value); \ 9861 9864 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \ 9862 9865 } while (0) 9863 #define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) &= (a_u64Value)9864 9865 #define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(p IemCpu, (a_iGReg)) |= (a_u8Value)9866 #define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(p IemCpu, (a_iGReg)) |= (a_u16Value)9866 #define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) &= (a_u64Value) 9867 9868 #define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pVCpu, (a_iGReg)) |= (a_u8Value) 9869 #define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pVCpu, (a_iGReg)) |= (a_u16Value) 9867 9870 #define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \ 9868 9871 do { \ 9869 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(p IemCpu, (a_iGReg)); \9872 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pVCpu, (a_iGReg)); \ 9870 9873 *pu32Reg |= (a_u32Value); \ 9871 9874 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \ 9872 9875 } while (0) 9873 #define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) |= (a_u64Value)9876 #define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) |= (a_u64Value) 9874 9877 9875 9878 9876 9879 /** @note Not for IOPL or IF modification. */ 9877 #define IEM_MC_SET_EFL_BIT(a_fBit) do { (p IemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)9880 #define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0) 9878 9881 /** @note Not for IOPL or IF modification. */ 9879 #define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (p IemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)9882 #define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0) 9880 9883 /** @note Not for IOPL or IF modification. */ 9881 #define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (p IemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)9882 9883 #define IEM_MC_CLEAR_FSW_EX() do { (p IemCpu)->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)9884 #define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0) 9885 9886 #define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0) 9884 9887 9885 9888 9886 9889 #define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \ 9887 do { (a_u64Value) = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)9890 do { (a_u64Value) = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0) 9888 9891 #define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \ 9889 do { (a_u32Value) = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)9892 do { (a_u32Value) = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0) 9890 9893 #define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \ 9891 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)9894 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0) 9892 9895 #define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \ 9893 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)9896 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0) 9894 9897 #define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \ 9895 (a_pu64Dst) = (&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)9898 (a_pu64Dst) = (&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 9896 9899 #define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \ 9897 (a_pu64Dst) = ((uint64_t const *)&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)9900 (a_pu64Dst) = ((uint64_t const *)&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 9898 9901 #define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \ 9899 (a_pu32Dst) = ((uint32_t const *)&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)9902 (a_pu32Dst) = ((uint32_t const *)&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx) 9900 9903 9901 9904 #define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \ 9902 do { (a_u128Value) = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)9905 do { (a_u128Value) = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0) 9903 9906 #define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \ 9904 do { (a_u64Value) = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)9907 do { (a_u64Value) = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0) 9905 9908 #define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \ 9906 do { (a_u32Value) = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)9909 do { (a_u32Value) = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0) 9907 9910 #define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \ 9908 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)9911 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0) 9909 9912 #define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \ 9910 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)9913 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0) 9911 9914 #define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \ 9912 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \9913 p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \9915 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \ 9916 pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \ 9914 9917 } while (0) 9915 9918 #define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \ 9916 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \9917 p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \9919 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \ 9920 pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \ 9918 9921 } while (0) 9919 9922 #define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \ 9920 (a_pu128Dst) = (&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)9923 (a_pu128Dst) = (&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm) 9921 9924 #define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \ 9922 (a_pu128Dst) = ((uint128_t const *)&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)9925 (a_pu128Dst) = ((uint128_t const *)&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm) 9923 9926 #define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \ 9924 (a_pu64Dst) = ((uint64_t const *)&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])9927 (a_pu64Dst) = ((uint64_t const *)&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]) 9925 9928 #define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \ 9926 do { p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \9927 = p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)9929 do { pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \ 9930 = pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0) 9928 9931 9929 9932 #ifndef IEM_WITH_SETJMP 9930 9933 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 9931 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))9934 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem))) 9932 9935 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \ 9933 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))9936 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16))) 9934 9937 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \ 9935 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))9938 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32))) 9936 9939 #else 9937 9940 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ 9938 ((a_u8Dst) = iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))9941 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 9939 9942 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \ 9940 ((a_u8Dst) = iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem16)))9943 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16))) 9941 9944 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \ 9942 ((a_u8Dst) = iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem32)))9945 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32))) 9943 9946 #endif 9944 9947 9945 9948 #ifndef IEM_WITH_SETJMP 9946 9949 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9947 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(p IemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))9950 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem))) 9948 9951 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9949 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(p IemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))9952 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9950 9953 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \ 9951 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(p IemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))9954 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem))) 9952 9955 #else 9953 9956 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 9954 ((a_u16Dst) = iemMemFetchDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))9957 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 9955 9958 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9956 ((a_u16Dst) = iemMemFetchDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))9959 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9957 9960 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \ 9958 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))9961 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 9959 9962 #endif 9960 9963 9961 9964 #ifndef IEM_WITH_SETJMP 9962 9965 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9963 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(p IemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))9966 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem))) 9964 9967 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9965 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(p IemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))9968 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9966 9969 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \ 9967 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(p IemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))9970 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem))) 9968 9971 #else 9969 9972 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 9970 ((a_u32Dst) = iemMemFetchDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))9973 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 9971 9974 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9972 ((a_u32Dst) = iemMemFetchDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))9975 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9973 9976 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \ 9974 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))9977 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 9975 9978 #endif 9976 9979 9977 9980 #ifdef SOME_UNUSED_FUNCTION 9978 9981 # define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9979 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(p IemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))9982 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 9980 9983 #endif 9981 9984 9982 9985 #ifndef IEM_WITH_SETJMP 9983 9986 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9984 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(p IemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))9987 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 9985 9988 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9986 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(p IemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))9989 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9987 9990 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9988 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(p IemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))9991 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem))) 9989 9992 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \ 9990 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(p IemCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))9993 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem))) 9991 9994 #else 9992 9995 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9993 ((a_u64Dst) = iemMemFetchDataU64Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))9996 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 9994 9997 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 9995 ((a_u64Dst) = iemMemFetchDataU64Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))9998 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 9996 9999 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \ 9997 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10000 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 9998 10001 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \ 9999 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10002 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10000 10003 #endif 10001 10004 10002 10005 #ifndef IEM_WITH_SETJMP 10003 10006 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 10004 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(p IemCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))10007 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem))) 10005 10008 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \ 10006 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(p IemCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))10009 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem))) 10007 10010 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ 10008 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(p IemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))10011 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))) 10009 10012 #else 10010 10013 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ 10011 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10014 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10012 10015 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \ 10013 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10016 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10014 10017 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ 10015 iemMemFetchDataR80Jmp(p IemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))10018 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)) 10016 10019 #endif 10017 10020 10018 10021 #ifndef IEM_WITH_SETJMP 10019 10022 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 10020 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(p IemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))10023 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 10021 10024 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ 10022 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(p IemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))10025 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 10023 10026 #else 10024 10027 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 10025 iemMemFetchDataU128Jmp(p IemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))10028 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)) 10026 10029 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \ 10027 iemMemFetchDataU128AlignedSseJmp(p IemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))10030 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)) 10028 10031 #endif 10029 10032 … … 10034 10037 do { \ 10035 10038 uint8_t u8Tmp; \ 10036 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \10039 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 10037 10040 (a_u16Dst) = u8Tmp; \ 10038 10041 } while (0) … … 10040 10043 do { \ 10041 10044 uint8_t u8Tmp; \ 10042 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \10045 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 10043 10046 (a_u32Dst) = u8Tmp; \ 10044 10047 } while (0) … … 10046 10049 do { \ 10047 10050 uint8_t u8Tmp; \ 10048 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \10051 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 10049 10052 (a_u64Dst) = u8Tmp; \ 10050 10053 } while (0) … … 10052 10055 do { \ 10053 10056 uint16_t u16Tmp; \ 10054 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(p IemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \10057 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 10055 10058 (a_u32Dst) = u16Tmp; \ 10056 10059 } while (0) … … 10058 10061 do { \ 10059 10062 uint16_t u16Tmp; \ 10060 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(p IemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \10063 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 10061 10064 (a_u64Dst) = u16Tmp; \ 10062 10065 } while (0) … … 10064 10067 do { \ 10065 10068 uint32_t u32Tmp; \ 10066 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(p IemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \10069 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \ 10067 10070 (a_u64Dst) = u32Tmp; \ 10068 10071 } while (0) 10069 10072 #else /* IEM_WITH_SETJMP */ 10070 10073 # define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 10071 ((a_u16Dst) = iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10074 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10072 10075 # define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 10073 ((a_u32Dst) = iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10076 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10074 10077 # define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 10075 ((a_u64Dst) = iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10078 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10076 10079 # define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 10077 ((a_u32Dst) = iemMemFetchDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10080 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10078 10081 # define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 10079 ((a_u64Dst) = iemMemFetchDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10082 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10080 10083 # define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 10081 ((a_u64Dst) = iemMemFetchDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10084 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10082 10085 #endif /* IEM_WITH_SETJMP */ 10083 10086 … … 10086 10089 do { \ 10087 10090 uint8_t u8Tmp; \ 10088 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \10091 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 10089 10092 (a_u16Dst) = (int8_t)u8Tmp; \ 10090 10093 } while (0) … … 10092 10095 do { \ 10093 10096 uint8_t u8Tmp; \ 10094 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \10097 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 10095 10098 (a_u32Dst) = (int8_t)u8Tmp; \ 10096 10099 } while (0) … … 10098 10101 do { \ 10099 10102 uint8_t u8Tmp; \ 10100 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(p IemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \10103 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \ 10101 10104 (a_u64Dst) = (int8_t)u8Tmp; \ 10102 10105 } while (0) … … 10104 10107 do { \ 10105 10108 uint16_t u16Tmp; \ 10106 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(p IemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \10109 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 10107 10110 (a_u32Dst) = (int16_t)u16Tmp; \ 10108 10111 } while (0) … … 10110 10113 do { \ 10111 10114 uint16_t u16Tmp; \ 10112 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(p IemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \10115 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \ 10113 10116 (a_u64Dst) = (int16_t)u16Tmp; \ 10114 10117 } while (0) … … 10116 10119 do { \ 10117 10120 uint32_t u32Tmp; \ 10118 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(p IemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \10121 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \ 10119 10122 (a_u64Dst) = (int32_t)u32Tmp; \ 10120 10123 } while (0) 10121 10124 #else /* IEM_WITH_SETJMP */ 10122 10125 # define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \ 10123 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10126 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10124 10127 # define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 10125 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10128 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10126 10129 # define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 10127 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10130 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10128 10131 # define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \ 10129 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10132 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10130 10133 # define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 10131 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10134 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10132 10135 # define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \ 10133 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem)))10136 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem))) 10134 10137 #endif /* IEM_WITH_SETJMP */ 10135 10138 10136 10139 #ifndef IEM_WITH_SETJMP 10137 10140 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 10138 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))10141 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))) 10139 10142 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \ 10140 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))10143 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))) 10141 10144 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \ 10142 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))10145 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))) 10143 10146 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \ 10144 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))10147 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))) 10145 10148 #else 10146 10149 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \ 10147 iemMemStoreDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))10150 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)) 10148 10151 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \ 10149 iemMemStoreDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))10152 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)) 10150 10153 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \ 10151 iemMemStoreDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))10154 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)) 10152 10155 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \ 10153 iemMemStoreDataU64Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))10156 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)) 10154 10157 #endif 10155 10158 10156 10159 #ifndef IEM_WITH_SETJMP 10157 10160 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 10158 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))10161 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))) 10159 10162 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \ 10160 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))10163 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))) 10161 10164 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \ 10162 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))10165 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))) 10163 10166 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \ 10164 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))10167 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))) 10165 10168 #else 10166 10169 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \ 10167 iemMemStoreDataU8Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))10170 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)) 10168 10171 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \ 10169 iemMemStoreDataU16Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))10172 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)) 10170 10173 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \ 10171 iemMemStoreDataU32Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))10174 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)) 10172 10175 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \ 10173 iemMemStoreDataU64Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))10176 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)) 10174 10177 #endif 10175 10178 … … 10188 10191 #ifndef IEM_WITH_SETJMP 10189 10192 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 10190 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))10193 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))) 10191 10194 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 10192 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))10195 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))) 10193 10196 #else 10194 10197 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \ 10195 iemMemStoreDataU128Jmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))10198 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)) 10196 10199 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 10197 iemMemStoreDataU128AlignedSseJmp(p IemCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))10200 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)) 10198 10201 #endif 10199 10202 10200 10203 10201 10204 #define IEM_MC_PUSH_U16(a_u16Value) \ 10202 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(p IemCpu, (a_u16Value)))10205 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value))) 10203 10206 #define IEM_MC_PUSH_U32(a_u32Value) \ 10204 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(p IemCpu, (a_u32Value)))10207 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value))) 10205 10208 #define IEM_MC_PUSH_U32_SREG(a_u32Value) \ 10206 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(p IemCpu, (a_u32Value)))10209 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value))) 10207 10210 #define IEM_MC_PUSH_U64(a_u64Value) \ 10208 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(p IemCpu, (a_u64Value)))10211 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value))) 10209 10212 10210 10213 #define IEM_MC_POP_U16(a_pu16Value) \ 10211 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(p IemCpu, (a_pu16Value)))10214 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value))) 10212 10215 #define IEM_MC_POP_U32(a_pu32Value) \ 10213 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(p IemCpu, (a_pu32Value)))10216 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value))) 10214 10217 #define IEM_MC_POP_U64(a_pu64Value) \ 10215 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(p IemCpu, (a_pu64Value)))10218 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value))) 10216 10219 10217 10220 /** Maps guest memory for direct or bounce buffered access. … … 10220 10223 */ 10221 10224 #define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \ 10222 IEM_MC_RETURN_ON_FAILURE(iemMemMap(p IemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))10225 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess))) 10223 10226 10224 10227 /** Maps guest memory for direct or bounce buffered access. … … 10227 10230 */ 10228 10231 #define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \ 10229 IEM_MC_RETURN_ON_FAILURE(iemMemMap(p IemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))10232 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess))) 10230 10233 10231 10234 /** Commits the memory and unmaps the guest memory. … … 10233 10236 */ 10234 10237 #define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \ 10235 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(p IemCpu, (a_pvMem), (a_fAccess)))10238 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))) 10236 10239 10237 10240 /** Commits the memory and unmaps the guest memory unless the FPU status word … … 10248 10251 if ( !(a_u16FSW & X86_FSW_ES) \ 10249 10252 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 10250 & ~(p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \10251 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(p IemCpu, (a_pvMem), (a_fAccess))); \10253 & ~(pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \ 10254 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \ 10252 10255 } while (0) 10253 10256 … … 10255 10258 #ifndef IEM_WITH_SETJMP 10256 10259 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \ 10257 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(p IemCpu, (bRm), (cbImm), &(a_GCPtrEff)))10260 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff))) 10258 10261 #else 10259 10262 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \ 10260 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(p IemCpu, (bRm), (cbImm)))10263 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm))) 10261 10264 #endif 10262 10265 … … 10276 10279 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0. 10277 10280 */ 10278 #define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode)10281 #define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode) 10279 10282 10280 10283 /** … … 10285 10288 * @param a0 The argument. 10286 10289 */ 10287 #define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0)10290 #define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0) 10288 10291 10289 10292 /** … … 10295 10298 * @param a1 The second extra argument. 10296 10299 */ 10297 #define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0, a1)10300 #define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1) 10298 10301 10299 10302 /** … … 10306 10309 * @param a2 The third extra argument. 10307 10310 */ 10308 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0, a1, a2)10311 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2) 10309 10312 10310 10313 /** … … 10318 10321 * @param a3 The fourth extra argument. 10319 10322 */ 10320 #define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0, a1, a2, a3)10323 #define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2, a3) 10321 10324 10322 10325 /** … … 10331 10334 * @param a4 The fifth extra argument. 10332 10335 */ 10333 #define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)10336 #define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2, a3, a4) 10334 10337 10335 10338 /** … … 10342 10345 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0. 10343 10346 */ 10344 #define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode)10347 #define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode) 10345 10348 10346 10349 /** … … 10353 10356 * @param a0 The argument. 10354 10357 */ 10355 #define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0)10358 #define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0) 10356 10359 10357 10360 /** … … 10365 10368 * @param a1 The second extra argument. 10366 10369 */ 10367 #define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0, a1)10370 #define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1) 10368 10371 10369 10372 /** … … 10378 10381 * @param a2 The third extra argument. 10379 10382 */ 10380 #define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(p IemCpu, pIemCpu->offOpcode, a0, a1, a2)10383 #define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, pVCpu->iem.s.offOpcode, a0, a1, a2) 10381 10384 10382 10385 /** … … 10388 10391 #define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \ 10389 10392 do { \ 10390 a_pfnAImpl(&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \10393 a_pfnAImpl(&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0)); \ 10391 10394 } while (0) 10392 10395 … … 10400 10403 #define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \ 10401 10404 do { \ 10402 a_pfnAImpl(&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \10405 a_pfnAImpl(&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \ 10403 10406 } while (0) 10404 10407 … … 10413 10416 #define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ 10414 10417 do { \ 10415 a_pfnAImpl(&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \10418 a_pfnAImpl(&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 10416 10419 } while (0) 10417 10420 … … 10424 10427 /** Pushes FPU result onto the stack. */ 10425 10428 #define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \ 10426 iemFpuPushResult(p IemCpu, &a_FpuData)10429 iemFpuPushResult(pVCpu, &a_FpuData) 10427 10430 /** Pushes FPU result onto the stack and sets the FPUDP. */ 10428 10431 #define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \ 10429 iemFpuPushResultWithMemOp(p IemCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)10432 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff) 10430 10433 10431 10434 /** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */ 10432 10435 #define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \ 10433 iemFpuPushResultTwo(p IemCpu, &a_FpuDataTwo)10436 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo) 10434 10437 10435 10438 /** Stores FPU result in a stack register. */ 10436 10439 #define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \ 10437 iemFpuStoreResult(p IemCpu, &a_FpuData, a_iStReg)10440 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg) 10438 10441 /** Stores FPU result in a stack register and pops the stack. */ 10439 10442 #define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \ 10440 iemFpuStoreResultThenPop(p IemCpu, &a_FpuData, a_iStReg)10443 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg) 10441 10444 /** Stores FPU result in a stack register and sets the FPUDP. */ 10442 10445 #define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \ 10443 iemFpuStoreResultWithMemOp(p IemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)10446 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) 10444 10447 /** Stores FPU result in a stack register, sets the FPUDP, and pops the 10445 10448 * stack. */ 10446 10449 #define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \ 10447 iemFpuStoreResultWithMemOpThenPop(p IemCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)10450 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) 10448 10451 10449 10452 /** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */ 10450 10453 #define IEM_MC_UPDATE_FPU_OPCODE_IP() \ 10451 iemFpuUpdateOpcodeAndIp(p IemCpu)10454 iemFpuUpdateOpcodeAndIp(pVCpu) 10452 10455 /** Free a stack register (for FFREE and FFREEP). */ 10453 10456 #define IEM_MC_FPU_STACK_FREE(a_iStReg) \ 10454 iemFpuStackFree(p IemCpu, a_iStReg)10457 iemFpuStackFree(pVCpu, a_iStReg) 10455 10458 /** Increment the FPU stack pointer. */ 10456 10459 #define IEM_MC_FPU_STACK_INC_TOP() \ 10457 iemFpuStackIncTop(p IemCpu)10460 iemFpuStackIncTop(pVCpu) 10458 10461 /** Decrement the FPU stack pointer. */ 10459 10462 #define IEM_MC_FPU_STACK_DEC_TOP() \ 10460 iemFpuStackDecTop(p IemCpu)10463 iemFpuStackDecTop(pVCpu) 10461 10464 10462 10465 /** Updates the FSW, FOP, FPUIP, and FPUCS. */ 10463 10466 #define IEM_MC_UPDATE_FSW(a_u16FSW) \ 10464 iemFpuUpdateFSW(p IemCpu, a_u16FSW)10467 iemFpuUpdateFSW(pVCpu, a_u16FSW) 10465 10468 /** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */ 10466 10469 #define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \ 10467 iemFpuUpdateFSW(p IemCpu, a_u16FSW)10470 iemFpuUpdateFSW(pVCpu, a_u16FSW) 10468 10471 /** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */ 10469 10472 #define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \ 10470 iemFpuUpdateFSWWithMemOp(p IemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)10473 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff) 10471 10474 /** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */ 10472 10475 #define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \ 10473 iemFpuUpdateFSWThenPop(p IemCpu, a_u16FSW)10476 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW) 10474 10477 /** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the 10475 10478 * stack. */ 10476 10479 #define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \ 10477 iemFpuUpdateFSWWithMemOpThenPop(p IemCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)10480 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff) 10478 10481 /** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */ 10479 10482 #define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \ 10480 iemFpuUpdateFSWThenPop(p IemCpu, a_u16FSW)10483 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW) 10481 10484 10482 10485 /** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */ 10483 10486 #define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \ 10484 iemFpuStackUnderflow(p IemCpu, a_iStDst)10487 iemFpuStackUnderflow(pVCpu, a_iStDst) 10485 10488 /** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops 10486 10489 * stack. */ 10487 10490 #define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \ 10488 iemFpuStackUnderflowThenPop(p IemCpu, a_iStDst)10491 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst) 10489 10492 /** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and 10490 10493 * FPUDS. */ 10491 10494 #define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \ 10492 iemFpuStackUnderflowWithMemOp(p IemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)10495 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff) 10493 10496 /** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and 10494 10497 * FPUDS. Pops stack. */ 10495 10498 #define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \ 10496 iemFpuStackUnderflowWithMemOpThenPop(p IemCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)10499 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff) 10497 10500 /** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops 10498 10501 * stack twice. */ 10499 10502 #define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \ 10500 iemFpuStackUnderflowThenPopPop(p IemCpu)10503 iemFpuStackUnderflowThenPopPop(pVCpu) 10501 10504 /** Raises a FPU stack underflow exception for an instruction pushing a result 10502 10505 * value onto the stack. Sets FPUIP, FPUCS and FOP. */ 10503 10506 #define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \ 10504 iemFpuStackPushUnderflow(p IemCpu)10507 iemFpuStackPushUnderflow(pVCpu) 10505 10508 /** Raises a FPU stack underflow exception for an instruction pushing a result 10506 10509 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */ 10507 10510 #define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \ 10508 iemFpuStackPushUnderflowTwo(p IemCpu)10511 iemFpuStackPushUnderflowTwo(pVCpu) 10509 10512 10510 10513 /** Raises a FPU stack overflow exception as part of a push attempt. Sets 10511 10514 * FPUIP, FPUCS and FOP. */ 10512 10515 #define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \ 10513 iemFpuStackPushOverflow(p IemCpu)10516 iemFpuStackPushOverflow(pVCpu) 10514 10517 /** Raises a FPU stack overflow exception as part of a push attempt. Sets 10515 10518 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */ 10516 10519 #define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \ 10517 iemFpuStackPushOverflowWithMemOp(p IemCpu, a_iEffSeg, a_GCPtrEff)10520 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff) 10518 10521 /** Prepares for using the FPU state. 10519 10522 * Ensures that we can use the host FPU in the current context (RC+R0. 10520 10523 * Ensures the guest FPU state in the CPUMCTX is up to date. */ 10521 #define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(p IemCpu)10524 #define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu) 10522 10525 /** Actualizes the guest FPU state so it can be accessed read-only fashion. */ 10523 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(p IemCpu)10526 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu) 10524 10527 /** Actualizes the guest FPU state so it can be accessed and modified. */ 10525 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(p IemCpu)10528 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu) 10526 10529 10527 10530 /** Prepares for using the SSE state. 10528 10531 * Ensures that we can use the host SSE/FPU in the current context (RC+R0. 10529 10532 * Ensures the guest SSE state in the CPUMCTX is up to date. */ 10530 #define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(p IemCpu)10533 #define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu) 10531 10534 /** Actualizes the guest XMM0..15 register state for read-only access. */ 10532 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(p IemCpu)10535 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu) 10533 10536 /** Actualizes the guest XMM0..15 register state for read-write access. */ 10534 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(p IemCpu)10537 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu) 10535 10538 10536 10539 /** … … 10544 10547 do { \ 10545 10548 IEM_MC_PREPARE_FPU_USAGE(); \ 10546 a_pfnAImpl(&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \10549 a_pfnAImpl(&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \ 10547 10550 } while (0) 10548 10551 … … 10558 10561 do { \ 10559 10562 IEM_MC_PREPARE_FPU_USAGE(); \ 10560 a_pfnAImpl(&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \10563 a_pfnAImpl(&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 10561 10564 } while (0) 10562 10565 … … 10572 10575 do { \ 10573 10576 IEM_MC_PREPARE_SSE_USAGE(); \ 10574 a_pfnAImpl(&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \10577 a_pfnAImpl(&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1)); \ 10575 10578 } while (0) 10576 10579 … … 10586 10589 do { \ 10587 10590 IEM_MC_PREPARE_SSE_USAGE(); \ 10588 a_pfnAImpl(&p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \10591 a_pfnAImpl(&pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 10589 10592 } while (0) 10590 10593 10591 10594 /** @note Not for IOPL or IF testing. */ 10592 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {10595 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit)) { 10593 10596 /** @note Not for IOPL or IF testing. */ 10594 #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit))) {10597 #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit))) { 10595 10598 /** @note Not for IOPL or IF testing. */ 10596 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {10599 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBits)) { 10597 10600 /** @note Not for IOPL or IF testing. */ 10598 #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits))) {10601 #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBits))) { 10599 10602 /** @note Not for IOPL or IF testing. */ 10600 10603 #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \ 10601 if ( !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \10602 != !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {10604 if ( !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \ 10605 != !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) { 10603 10606 /** @note Not for IOPL or IF testing. */ 10604 10607 #define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \ 10605 if ( !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \10606 == !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {10608 if ( !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \ 10609 == !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) { 10607 10610 /** @note Not for IOPL or IF testing. */ 10608 10611 #define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \ 10609 if ( (p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \10610 || !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \10611 != !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {10612 if ( (pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \ 10613 || !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \ 10614 != !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) { 10612 10615 /** @note Not for IOPL or IF testing. */ 10613 10616 #define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \ 10614 if ( !(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \10615 && !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \10616 == !!(p IemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {10617 #define IEM_MC_IF_CX_IS_NZ() if (p IemCpu->CTX_SUFF(pCtx)->cx != 0) {10618 #define IEM_MC_IF_ECX_IS_NZ() if (p IemCpu->CTX_SUFF(pCtx)->ecx != 0) {10619 #define IEM_MC_IF_RCX_IS_NZ() if (p IemCpu->CTX_SUFF(pCtx)->rcx != 0) {10617 if ( !(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \ 10618 && !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \ 10619 == !!(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) { 10620 #define IEM_MC_IF_CX_IS_NZ() if (pVCpu->iem.s.CTX_SUFF(pCtx)->cx != 0) { 10621 #define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->iem.s.CTX_SUFF(pCtx)->ecx != 0) { 10622 #define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->iem.s.CTX_SUFF(pCtx)->rcx != 0) { 10620 10623 /** @note Not for IOPL or IF testing. */ 10621 10624 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 10622 if ( p IemCpu->CTX_SUFF(pCtx)->cx != 0 \10623 && (p IemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {10625 if ( pVCpu->iem.s.CTX_SUFF(pCtx)->cx != 0 \ 10626 && (pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & a_fBit)) { 10624 10627 /** @note Not for IOPL or IF testing. */ 10625 10628 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 10626 if ( p IemCpu->CTX_SUFF(pCtx)->ecx != 0 \10627 && (p IemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {10629 if ( pVCpu->iem.s.CTX_SUFF(pCtx)->ecx != 0 \ 10630 && (pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & a_fBit)) { 10628 10631 /** @note Not for IOPL or IF testing. */ 10629 10632 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \ 10630 if ( p IemCpu->CTX_SUFF(pCtx)->rcx != 0 \10631 && (p IemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {10633 if ( pVCpu->iem.s.CTX_SUFF(pCtx)->rcx != 0 \ 10634 && (pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & a_fBit)) { 10632 10635 /** @note Not for IOPL or IF testing. */ 10633 10636 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 10634 if ( p IemCpu->CTX_SUFF(pCtx)->cx != 0 \10635 && !(p IemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {10637 if ( pVCpu->iem.s.CTX_SUFF(pCtx)->cx != 0 \ 10638 && !(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & a_fBit)) { 10636 10639 /** @note Not for IOPL or IF testing. */ 10637 10640 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 10638 if ( p IemCpu->CTX_SUFF(pCtx)->ecx != 0 \10639 && !(p IemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {10641 if ( pVCpu->iem.s.CTX_SUFF(pCtx)->ecx != 0 \ 10642 && !(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & a_fBit)) { 10640 10643 /** @note Not for IOPL or IF testing. */ 10641 10644 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \ 10642 if ( p IemCpu->CTX_SUFF(pCtx)->rcx != 0 \10643 && !(p IemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {10645 if ( pVCpu->iem.s.CTX_SUFF(pCtx)->rcx != 0 \ 10646 && !(pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.u & a_fBit)) { 10644 10647 #define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) { 10645 #define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(p IemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {10648 #define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) { 10646 10649 10647 10650 #define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \ 10648 if (iemFpuStRegNotEmpty(p IemCpu, (a_iSt)) == VINF_SUCCESS) {10651 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) { 10649 10652 #define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \ 10650 if (iemFpuStRegNotEmpty(p IemCpu, (a_iSt)) != VINF_SUCCESS) {10653 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) { 10651 10654 #define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \ 10652 if (iemFpuStRegNotEmptyRef(p IemCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {10655 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) { 10653 10656 #define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \ 10654 if (iemFpu2StRegsNotEmptyRef(p IemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {10657 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) { 10655 10658 #define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \ 10656 if (iemFpu2StRegsNotEmptyRefFirst(p IemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {10659 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) { 10657 10660 #define IEM_MC_IF_FCW_IM() \ 10658 if (p IemCpu->CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {10661 if (pVCpu->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) { 10659 10662 10660 10663 #define IEM_MC_ELSE() } else { … … 10669 10672 #ifdef DEBUG 10670 10673 # define IEMOP_MNEMONIC(a_szMnemonic) \ 10671 Log4(("decode - %04x:%RGv %s%s [#%u]\n", p IemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \10672 p IemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pIemCpu->cInstructions))10674 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->iem.s.CTX_SUFF(pCtx)->cs.Sel, pVCpu->iem.s.CTX_SUFF(pCtx)->rip, \ 10675 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)) 10673 10676 # define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \ 10674 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", p IemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \10675 p IemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))10677 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pVCpu->iem.s.CTX_SUFF(pCtx)->cs.Sel, pVCpu->iem.s.CTX_SUFF(pCtx)->rip, \ 10678 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pVCpu->iem.s.cInstructions)) 10676 10679 #else 10677 10680 # define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0) … … 10689 10692 # define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \ 10690 10693 do { \ 10691 if (IEM_GET_TARGET_CPU(p IemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \10694 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \ 10692 10695 else \ 10693 10696 { \ 10694 DBGFSTOP( IEMCPU_TO_VM(pIemCpu)); \10697 DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \ 10695 10698 return IEMOP_RAISE_INVALID_OPCODE(); \ 10696 10699 } \ … … 10699 10702 # define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \ 10700 10703 do { \ 10701 if (IEM_GET_TARGET_CPU(p IemCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \10704 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \ 10702 10705 else return IEMOP_RAISE_INVALID_OPCODE(); \ 10703 10706 } while (0) … … 10758 10761 do \ 10759 10762 { \ 10760 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu)) \10763 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \ 10761 10764 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \ 10762 10765 } while (0) … … 10768 10771 do \ 10769 10772 { \ 10770 if (p IemCpu->fPrefixes & IEM_OP_PRF_LOCK) \10773 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) \ 10771 10774 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \ 10772 10775 } while (0) … … 10777 10780 do \ 10778 10781 { \ 10779 if (p IemCpu->enmCpuMode == IEMMODE_64BIT) \10782 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \ 10780 10783 return IEMOP_RAISE_INVALID_OPCODE(); \ 10781 10784 } while (0) … … 10786 10789 do \ 10787 10790 { \ 10788 if (p IemCpu->enmCpuMode != IEMMODE_64BIT) \10791 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \ 10789 10792 return IEMOP_RAISE_INVALID_OPCODE(); \ 10790 10793 } while (0) … … 10794 10797 do \ 10795 10798 { \ 10796 if (p IemCpu->enmCpuMode == IEMMODE_64BIT) \10797 iemRecalEffOpSize64Default(p IemCpu); \10799 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \ 10800 iemRecalEffOpSize64Default(pVCpu); \ 10798 10801 } while (0) 10799 10802 … … 10802 10805 do \ 10803 10806 { \ 10804 if (p IemCpu->enmCpuMode == IEMMODE_64BIT) \10805 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT; \10807 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \ 10808 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \ 10806 10809 } while (0) 10807 10810 … … 10811 10814 do \ 10812 10815 { \ 10813 if (RT_UNLIKELY(p IemCpu->fPrefixes & IEM_OP_PRF_REX)) \10816 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \ 10814 10817 { \ 10815 10818 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \ 10816 p IemCpu->CTX_SUFF(pCtx)->rip, pIemCpu->fPrefixes)); \10817 p IemCpu->fPrefixes &= ~IEM_OP_PRF_REX_MASK; \10818 p IemCpu->uRexB = 0; \10819 p IemCpu->uRexIndex = 0; \10820 p IemCpu->uRexReg = 0; \10821 iemRecalEffOpSize(p IemCpu); \10819 pVCpu->iem.s.CTX_SUFF(pCtx)->rip, pVCpu->iem.s.fPrefixes)); \ 10820 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \ 10821 pVCpu->iem.s.uRexB = 0; \ 10822 pVCpu->iem.s.uRexIndex = 0; \ 10823 pVCpu->iem.s.uRexReg = 0; \ 10824 iemRecalEffOpSize(pVCpu); \ 10822 10825 } \ 10823 10826 } while (0) … … 10838 10841 do \ 10839 10842 { \ 10840 if (RT_LIKELY(!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \10843 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \ 10841 10844 { /* likely */ } \ 10842 10845 else \ … … 10846 10849 do \ 10847 10850 { \ 10848 if (RT_LIKELY(!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \10851 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \ 10849 10852 { /* likely */ } \ 10850 10853 else \ … … 10857 10860 do \ 10858 10861 { \ 10859 if (RT_LIKELY(!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))) \10862 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \ 10860 10863 { /* likely */ } \ 10861 10864 else \ … … 10872 10875 do \ 10873 10876 { \ 10874 if (RT_LIKELY(!(p IemCpu->fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \10877 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \ 10875 10878 { /* likely */ } \ 10876 10879 else \ … … 10885 10888 * 10886 10889 * @return Strict VBox status code. 10887 * @param p IemCpu The IEM per CPU data.10890 * @param pVCpu The cross context virtual CPU structure of the calling thread. 10888 10891 * @param bRm The ModRM byte. 10889 10892 * @param cbImm The size of any immediate following the … … 10892 10895 * @param pGCPtrEff Where to return the effective address. 10893 10896 */ 10894 IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(P IEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)10897 IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) 10895 10898 { 10896 10899 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm)); 10897 PCCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);10900 PCCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 10898 10901 # define SET_SS_DEF() \ 10899 10902 do \ 10900 10903 { \ 10901 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \10902 p IemCpu->iEffSeg = X86_SREG_SS; \10904 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \ 10905 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \ 10903 10906 } while (0) 10904 10907 10905 if (p IemCpu->enmCpuMode != IEMMODE_64BIT)10908 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) 10906 10909 { 10907 10910 /** @todo Check the effective address size crap! */ 10908 if (p IemCpu->enmEffAddrMode == IEMMODE_16BIT)10911 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 10909 10912 { 10910 10913 uint16_t u16EffAddr; … … 10942 10945 else 10943 10946 { 10944 Assert(p IemCpu->enmEffAddrMode == IEMMODE_32BIT);10947 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 10945 10948 uint32_t u32EffAddr; 10946 10949 … … 11031 11034 11032 11035 } 11033 if (p IemCpu->enmEffAddrMode == IEMMODE_32BIT)11036 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT) 11034 11037 *pGCPtrEff = u32EffAddr; 11035 11038 else 11036 11039 { 11037 Assert(p IemCpu->enmEffAddrMode == IEMMODE_16BIT);11040 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT); 11038 11041 *pGCPtrEff = u32EffAddr & UINT16_MAX; 11039 11042 } … … 11048 11051 { 11049 11052 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 11050 u64EffAddr += pCtx->rip + p IemCpu->offOpcode + cbImm;11053 u64EffAddr += pCtx->rip + pVCpu->iem.s.offOpcode + cbImm; 11051 11054 } 11052 11055 else 11053 11056 { 11054 11057 /* Get the register (or SIB) value. */ 11055 switch ((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB)11058 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 11056 11059 { 11057 11060 case 0: u64EffAddr = pCtx->rax; break; … … 11076 11079 11077 11080 /* Get the index and scale it. */ 11078 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | p IemCpu->uRexIndex)11081 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 11079 11082 { 11080 11083 case 0: u64EffAddr = pCtx->rax; break; … … 11099 11102 11100 11103 /* add base */ 11101 switch ((bSib & X86_SIB_BASE_MASK) | p IemCpu->uRexB)11104 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 11102 11105 { 11103 11106 case 0: u64EffAddr += pCtx->rax; break; … … 11120 11123 if ((bRm & X86_MODRM_MOD_MASK) != 0) 11121 11124 { 11122 if (!p IemCpu->uRexB)11125 if (!pVCpu->iem.s.uRexB) 11123 11126 { 11124 11127 u64EffAddr += pCtx->rbp; … … 11166 11169 } 11167 11170 11168 if (p IemCpu->enmEffAddrMode == IEMMODE_64BIT)11171 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 11169 11172 *pGCPtrEff = u64EffAddr; 11170 11173 else 11171 11174 { 11172 Assert(p IemCpu->enmEffAddrMode == IEMMODE_32BIT);11175 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 11173 11176 *pGCPtrEff = u64EffAddr & UINT32_MAX; 11174 11177 } … … 11189 11192 * 11190 11193 * @return The effective address. 11191 * @param p IemCpu The IEM per CPU data.11194 * @param pVCpu The cross context virtual CPU structure of the calling thread. 11192 11195 * @param bRm The ModRM byte. 11193 11196 * @param cbImm The size of any immediate following the … … 11195 11198 * RIP relative addressing. 11196 11199 */ 11197 IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(P IEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm)11200 IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm) 11198 11201 { 11199 11202 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm)); 11200 PCCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);11203 PCCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 11201 11204 # define SET_SS_DEF() \ 11202 11205 do \ 11203 11206 { \ 11204 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \11205 p IemCpu->iEffSeg = X86_SREG_SS; \11207 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \ 11208 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \ 11206 11209 } while (0) 11207 11210 11208 if (p IemCpu->enmCpuMode != IEMMODE_64BIT)11211 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) 11209 11212 { 11210 11213 /** @todo Check the effective address size crap! */ 11211 if (p IemCpu->enmEffAddrMode == IEMMODE_16BIT)11214 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 11212 11215 { 11213 11216 uint16_t u16EffAddr; … … 11224 11227 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break; 11225 11228 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break; 11226 default: AssertFailedStmt(longjmp(*p IemCpu->CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */11229 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */ 11227 11230 } 11228 11231 … … 11245 11248 } 11246 11249 11247 Assert(p IemCpu->enmEffAddrMode == IEMMODE_32BIT);11250 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 11248 11251 uint32_t u32EffAddr; 11249 11252 … … 11334 11337 } 11335 11338 11336 if (p IemCpu->enmEffAddrMode == IEMMODE_32BIT)11339 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT) 11337 11340 { 11338 11341 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr)); 11339 11342 return u32EffAddr; 11340 11343 } 11341 Assert(p IemCpu->enmEffAddrMode == IEMMODE_16BIT);11344 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT); 11342 11345 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX)); 11343 11346 return u32EffAddr & UINT16_MAX; … … 11350 11353 { 11351 11354 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 11352 u64EffAddr += pCtx->rip + p IemCpu->offOpcode + cbImm;11355 u64EffAddr += pCtx->rip + pVCpu->iem.s.offOpcode + cbImm; 11353 11356 } 11354 11357 else 11355 11358 { 11356 11359 /* Get the register (or SIB) value. */ 11357 switch ((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB)11360 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 11358 11361 { 11359 11362 case 0: u64EffAddr = pCtx->rax; break; … … 11378 11381 11379 11382 /* Get the index and scale it. */ 11380 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | p IemCpu->uRexIndex)11383 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 11381 11384 { 11382 11385 case 0: u64EffAddr = pCtx->rax; break; … … 11401 11404 11402 11405 /* add base */ 11403 switch ((bSib & X86_SIB_BASE_MASK) | p IemCpu->uRexB)11406 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 11404 11407 { 11405 11408 case 0: u64EffAddr += pCtx->rax; break; … … 11422 11425 if ((bRm & X86_MODRM_MOD_MASK) != 0) 11423 11426 { 11424 if (!p IemCpu->uRexB)11427 if (!pVCpu->iem.s.uRexB) 11425 11428 { 11426 11429 u64EffAddr += pCtx->rbp; … … 11468 11471 } 11469 11472 11470 if (p IemCpu->enmEffAddrMode == IEMMODE_64BIT)11473 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 11471 11474 { 11472 11475 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr)); 11473 11476 return u64EffAddr; 11474 11477 } 11475 Assert(p IemCpu->enmEffAddrMode == IEMMODE_32BIT);11478 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 11476 11479 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX)); 11477 11480 return u64EffAddr & UINT32_MAX; … … 11497 11500 * Sets up execution verification mode. 11498 11501 */ 11499 IEM_STATIC void iemExecVerificationModeSetup(P IEMCPU pIemCpu)11500 { 11501 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);11502 PCPUMCTX pOrgCtx = p IemCpu->CTX_SUFF(pCtx);11502 IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu) 11503 { 11504 PVMCPU pVCpu = pVCpu; 11505 PCPUMCTX pOrgCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 11503 11506 11504 11507 /* 11505 11508 * Always note down the address of the current instruction. 11506 11509 */ 11507 p IemCpu->uOldCs = pOrgCtx->cs.Sel;11508 p IemCpu->uOldRip = pOrgCtx->rip;11510 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel; 11511 pVCpu->iem.s.uOldRip = pOrgCtx->rip; 11509 11512 11510 11513 /* … … 11593 11596 fNewNoRem = false; 11594 11597 } 11595 if (fNewNoRem != p IemCpu->fNoRem)11596 { 11597 p IemCpu->fNoRem = fNewNoRem;11598 if (fNewNoRem != pVCpu->iem.s.fNoRem) 11599 { 11600 pVCpu->iem.s.fNoRem = fNewNoRem; 11598 11601 if (!fNewNoRem) 11599 11602 { … … 11608 11611 * Switch state. 11609 11612 */ 11610 if (IEM_VERIFICATION_ENABLED(p IemCpu))11613 if (IEM_VERIFICATION_ENABLED(pVCpu)) 11611 11614 { 11612 11615 static CPUMCTX s_DebugCtx; /* Ugly! */ 11613 11616 11614 11617 s_DebugCtx = *pOrgCtx; 11615 p IemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;11618 pVCpu->iem.s.CTX_SUFF(pCtx) = &s_DebugCtx; 11616 11619 } 11617 11620 … … 11619 11622 * See if there is an interrupt pending in TRPM and inject it if we can. 11620 11623 */ 11621 p IemCpu->uInjectCpl = UINT8_MAX;11624 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 11622 11625 if ( pOrgCtx->eflags.Bits.u1IF 11623 11626 && TRPMHasTrap(pVCpu) … … 11630 11633 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); 11631 11634 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */); 11632 if (!IEM_VERIFICATION_ENABLED(p IemCpu))11635 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 11633 11636 TRPMResetTrap(pVCpu); 11634 p IemCpu->uInjectCpl = pIemCpu->uCpl;11637 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl; 11635 11638 } 11636 11639 … … 11638 11641 * Reset the counters. 11639 11642 */ 11640 p IemCpu->cIOReads = 0;11641 p IemCpu->cIOWrites = 0;11642 p IemCpu->fIgnoreRaxRdx = false;11643 p IemCpu->fOverlappingMovs = false;11644 p IemCpu->fProblematicMemory = false;11645 p IemCpu->fUndefinedEFlags = 0;11646 11647 if (IEM_VERIFICATION_ENABLED(p IemCpu))11643 pVCpu->iem.s.cIOReads = 0; 11644 pVCpu->iem.s.cIOWrites = 0; 11645 pVCpu->iem.s.fIgnoreRaxRdx = false; 11646 pVCpu->iem.s.fOverlappingMovs = false; 11647 pVCpu->iem.s.fProblematicMemory = false; 11648 pVCpu->iem.s.fUndefinedEFlags = 0; 11649 11650 if (IEM_VERIFICATION_ENABLED(pVCpu)) 11648 11651 { 11649 11652 /* 11650 11653 * Free all verification records. 11651 11654 */ 11652 PIEMVERIFYEVTREC pEvtRec = p IemCpu->pIemEvtRecHead;11653 p IemCpu->pIemEvtRecHead = NULL;11654 p IemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;11655 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead; 11656 pVCpu->iem.s.pIemEvtRecHead = NULL; 11657 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead; 11655 11658 do 11656 11659 { … … 11658 11661 { 11659 11662 PIEMVERIFYEVTREC pNext = pEvtRec->pNext; 11660 pEvtRec->pNext = p IemCpu->pFreeEvtRec;11661 p IemCpu->pFreeEvtRec = pEvtRec;11663 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec; 11664 pVCpu->iem.s.pFreeEvtRec = pEvtRec; 11662 11665 pEvtRec = pNext; 11663 11666 } 11664 pEvtRec = p IemCpu->pOtherEvtRecHead;11665 p IemCpu->pOtherEvtRecHead = NULL;11666 p IemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;11667 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead; 11668 pVCpu->iem.s.pOtherEvtRecHead = NULL; 11669 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead; 11667 11670 } while (pEvtRec); 11668 11671 } … … 11674 11677 * @returns Pointer to a record. 11675 11678 */ 11676 IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(P IEMCPU pIemCpu)11677 { 11678 if (!IEM_VERIFICATION_ENABLED(p IemCpu))11679 IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu) 11680 { 11681 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 11679 11682 return NULL; 11680 11683 11681 PIEMVERIFYEVTREC pEvtRec = p IemCpu->pFreeEvtRec;11684 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec; 11682 11685 if (pEvtRec) 11683 p IemCpu->pFreeEvtRec = pEvtRec->pNext;11686 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext; 11684 11687 else 11685 11688 { 11686 if (!p IemCpu->ppIemEvtRecNext)11689 if (!pVCpu->iem.s.ppIemEvtRecNext) 11687 11690 return NULL; /* Too early (fake PCIBIOS), ignore notification. */ 11688 11691 11689 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc( IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));11692 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec)); 11690 11693 if (!pEvtRec) 11691 11694 return NULL; … … 11705 11708 if (!pVCpu) 11706 11709 return; 11707 PIEMCPU pIemCpu = &pVCpu->iem.s; 11708 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 11710 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11709 11711 if (!pEvtRec) 11710 11712 return; … … 11712 11714 pEvtRec->u.RamRead.GCPhys = GCPhys; 11713 11715 pEvtRec->u.RamRead.cb = (uint32_t)cbValue; 11714 pEvtRec->pNext = *p IemCpu->ppOtherEvtRecNext;11715 *p IemCpu->ppOtherEvtRecNext = pEvtRec;11716 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext; 11717 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec; 11716 11718 } 11717 11719 … … 11725 11727 if (!pVCpu) 11726 11728 return; 11727 PIEMCPU pIemCpu = &pVCpu->iem.s; 11728 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 11729 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11729 11730 if (!pEvtRec) 11730 11731 return; … … 11736 11737 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value); 11737 11738 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value); 11738 pEvtRec->pNext = *p IemCpu->ppOtherEvtRecNext;11739 *p IemCpu->ppOtherEvtRecNext = pEvtRec;11739 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext; 11740 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec; 11740 11741 } 11741 11742 … … 11749 11750 if (!pVCpu) 11750 11751 return; 11751 PIEMCPU pIemCpu = &pVCpu->iem.s; 11752 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 11752 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11753 11753 if (!pEvtRec) 11754 11754 return; … … 11756 11756 pEvtRec->u.IOPortRead.Port = Port; 11757 11757 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue; 11758 pEvtRec->pNext = *p IemCpu->ppOtherEvtRecNext;11759 *p IemCpu->ppOtherEvtRecNext = pEvtRec;11758 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext; 11759 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec; 11760 11760 } 11761 11761 … … 11768 11768 if (!pVCpu) 11769 11769 return; 11770 PIEMCPU pIemCpu = &pVCpu->iem.s; 11771 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 11770 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11772 11771 if (!pEvtRec) 11773 11772 return; … … 11776 11775 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue; 11777 11776 pEvtRec->u.IOPortWrite.u32Value = u32Value; 11778 pEvtRec->pNext = *p IemCpu->ppOtherEvtRecNext;11779 *p IemCpu->ppOtherEvtRecNext = pEvtRec;11777 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext; 11778 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec; 11780 11779 } 11781 11780 … … 11786 11785 if (!pVCpu) 11787 11786 return; 11788 PIEMCPU pIemCpu = &pVCpu->iem.s; 11789 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 11787 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11790 11788 if (!pEvtRec) 11791 11789 return; … … 11794 11792 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue; 11795 11793 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers; 11796 pEvtRec->pNext = *p IemCpu->ppOtherEvtRecNext;11797 *p IemCpu->ppOtherEvtRecNext = pEvtRec;11794 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext; 11795 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec; 11798 11796 } 11799 11797 … … 11804 11802 if (!pVCpu) 11805 11803 return; 11806 PIEMCPU pIemCpu = &pVCpu->iem.s; 11807 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu); 11804 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11808 11805 if (!pEvtRec) 11809 11806 return; … … 11812 11809 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue; 11813 11810 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers; 11814 pEvtRec->pNext = *p IemCpu->ppOtherEvtRecNext;11815 *p IemCpu->ppOtherEvtRecNext = pEvtRec;11811 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext; 11812 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec; 11816 11813 } 11817 11814 … … 11821 11818 * 11822 11819 * @returns VINF_SUCCESS. 11823 * @param p IemCpu The IEM per CPU data.11820 * @param pVCpu The cross context virtual CPU structure of the calling thread. 11824 11821 * @param Port The I/O port. 11825 11822 * @param pu32Value Where to store the fake value. 11826 11823 * @param cbValue The size of the access. 11827 11824 */ 11828 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(P IEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)11829 { 11830 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(p IemCpu);11825 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue) 11826 { 11827 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11831 11828 if (pEvtRec) 11832 11829 { … … 11834 11831 pEvtRec->u.IOPortRead.Port = Port; 11835 11832 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue; 11836 pEvtRec->pNext = *p IemCpu->ppIemEvtRecNext;11837 *p IemCpu->ppIemEvtRecNext = pEvtRec;11838 } 11839 p IemCpu->cIOReads++;11833 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 11834 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 11835 } 11836 pVCpu->iem.s.cIOReads++; 11840 11837 *pu32Value = 0xcccccccc; 11841 11838 return VINF_SUCCESS; … … 11847 11844 * 11848 11845 * @returns VINF_SUCCESS. 11849 * @param p IemCpu The IEM per CPU data.11846 * @param pVCpu The cross context virtual CPU structure of the calling thread. 11850 11847 * @param Port The I/O port. 11851 11848 * @param u32Value The value being written. 11852 11849 * @param cbValue The size of the access. 11853 11850 */ 11854 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(P IEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)11855 { 11856 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(p IemCpu);11851 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue) 11852 { 11853 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 11857 11854 if (pEvtRec) 11858 11855 { … … 11861 11858 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue; 11862 11859 pEvtRec->u.IOPortWrite.u32Value = u32Value; 11863 pEvtRec->pNext = *p IemCpu->ppIemEvtRecNext;11864 *p IemCpu->ppIemEvtRecNext = pEvtRec;11865 } 11866 p IemCpu->cIOWrites++;11860 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 11861 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 11862 } 11863 pVCpu->iem.s.cIOWrites++; 11867 11864 return VINF_SUCCESS; 11868 11865 } … … 11871 11868 /** 11872 11869 * Used to add extra details about a stub case. 11873 * @param p IemCpu The IEM per CPU state.11874 */ 11875 IEM_STATIC void iemVerifyAssertMsg2(P IEMCPU pIemCpu)11876 { 11877 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);11878 PVM pVM = IEMCPU_TO_VM(pIemCpu);11879 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);11870 * @param pVCpu The cross context virtual CPU structure of the calling thread. 11871 */ 11872 IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu) 11873 { 11874 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 11875 PVM pVM = pVCpu->CTX_SUFF(pVM); 11876 PVMCPU pVCpu = pVCpu; 11880 11877 char szRegs[4096]; 11881 11878 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs), … … 11907 11904 11908 11905 char szInstr1[256]; 11909 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, p IemCpu->uOldCs, pIemCpu->uOldRip,11906 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip, 11910 11907 DBGF_DISAS_FLAGS_DEFAULT_MODE, 11911 11908 szInstr1, sizeof(szInstr1), NULL); … … 11975 11972 * a record dump attached. 11976 11973 * 11977 * @param p IemCpu The IEM per CPU data.11974 * @param pVCpu The cross context virtual CPU structure of the calling thread. 11978 11975 * @param pEvtRec1 The first record. 11979 11976 * @param pEvtRec2 The second record. 11980 11977 * @param pszMsg The message explaining why we're asserting. 11981 11978 */ 11982 IEM_STATIC void iemVerifyAssertRecords(P IEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)11979 IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg) 11983 11980 { 11984 11981 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__); 11985 11982 iemVerifyAssertAddRecordDump(pEvtRec1); 11986 11983 iemVerifyAssertAddRecordDump(pEvtRec2); 11987 iemVerifyAssertMsg2(p IemCpu);11984 iemVerifyAssertMsg2(pVCpu); 11988 11985 RTAssertPanic(); 11989 11986 } … … 11994 11991 * a record dump attached. 11995 11992 * 11996 * @param p IemCpu The IEM per CPU data.11993 * @param pVCpu The cross context virtual CPU structure of the calling thread. 11997 11994 * @param pEvtRec1 The first record. 11998 11995 * @param pszMsg The message explaining why we're asserting. 11999 11996 */ 12000 IEM_STATIC void iemVerifyAssertRecord(P IEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)11997 IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg) 12001 11998 { 12002 11999 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__); 12003 12000 iemVerifyAssertAddRecordDump(pEvtRec); 12004 iemVerifyAssertMsg2(p IemCpu);12001 iemVerifyAssertMsg2(pVCpu); 12005 12002 RTAssertPanic(); 12006 12003 } … … 12010 12007 * Verifies a write record. 12011 12008 * 12012 * @param p IemCpu The IEM per CPU data.12009 * @param pVCpu The cross context virtual CPU structure of the calling thread. 12013 12010 * @param pEvtRec The write record. 12014 12011 * @param fRem Set if REM was doing the other executing. If clear 12015 12012 * it was HM. 12016 12013 */ 12017 IEM_STATIC void iemVerifyWriteRecord(P IEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)12014 IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem) 12018 12015 { 12019 12016 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf); 12020 12017 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb); 12021 int rc = PGMPhysSimpleReadGCPhys( IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);12018 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb); 12022 12019 if ( RT_FAILURE(rc) 12023 12020 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) ) 12024 12021 { 12025 12022 /* fend off ins */ 12026 if ( !p IemCpu->cIOReads12023 if ( !pVCpu->iem.s.cIOReads 12027 12024 || pEvtRec->u.RamWrite.ab[0] != 0xcc 12028 12025 || ( pEvtRec->u.RamWrite.cb != 1 … … 12037 12034 if (pEvtRec->u.RamWrite.cb != 512) 12038 12035 { 12039 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled( IEMCPU_TO_VM(pIemCpu)->pUVM) ? "vmx" : "svm";12036 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm"; 12040 12037 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__); 12041 12038 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys); … … 12045 12042 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab); 12046 12043 iemVerifyAssertAddRecordDump(pEvtRec); 12047 iemVerifyAssertMsg2(p IemCpu);12044 iemVerifyAssertMsg2(pVCpu); 12048 12045 RTAssertPanic(); 12049 12046 } … … 12057 12054 * Performs the post-execution verfication checks. 12058 12055 */ 12059 IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(P IEMCPU pIemCpu, VBOXSTRICTRC rcStrictIem)12060 { 12061 if (!IEM_VERIFICATION_ENABLED(p IemCpu))12056 IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem) 12057 { 12058 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 12062 12059 return rcStrictIem; 12063 12060 … … 12065 12062 * Switch back the state. 12066 12063 */ 12067 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr( IEMCPU_TO_VMCPU(pIemCpu));12068 PCPUMCTX pDebugCtx = p IemCpu->CTX_SUFF(pCtx);12064 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu); 12065 PCPUMCTX pDebugCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12069 12066 Assert(pOrgCtx != pDebugCtx); 12070 p IemCpu->CTX_SUFF(pCtx) = pOrgCtx;12067 pVCpu->iem.s.CTX_SUFF(pCtx) = pOrgCtx; 12071 12068 12072 12069 /* … … 12074 12071 */ 12075 12072 bool fRem = false; 12076 PVM pVM = IEMCPU_TO_VM(pIemCpu);12077 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);12073 PVM pVM = pVCpu->CTX_SUFF(pVM); 12074 PVMCPU pVCpu = pVCpu; 12078 12075 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST; 12079 12076 #ifdef IEM_VERIFICATION_MODE_FULL_HM 12080 12077 if ( HMIsEnabled(pVM) 12081 && p IemCpu->cIOReads == 012082 && p IemCpu->cIOWrites == 012083 && !p IemCpu->fProblematicMemory)12078 && pVCpu->iem.s.cIOReads == 0 12079 && pVCpu->iem.s.cIOWrites == 0 12080 && !pVCpu->iem.s.fProblematicMemory) 12084 12081 { 12085 12082 uint64_t uStartRip = pOrgCtx->rip; … … 12094 12091 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip) 12095 12092 || ( pOrgCtx->rip != pDebugCtx->rip 12096 && p IemCpu->uInjectCpl != UINT8_MAX12093 && pVCpu->iem.s.uInjectCpl != UINT8_MAX 12097 12094 && iLoops < 8) ); 12098 12095 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip) … … 12121 12118 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED) 12122 12119 { 12123 p IemCpu->CTX_SUFF(pCtx) = pOrgCtx;12120 pVCpu->iem.s.CTX_SUFF(pCtx) = pOrgCtx; 12124 12121 if (rc == VINF_EM_DBG_STEPPED) 12125 12122 return VINF_SUCCESS; … … 12252 12249 } 12253 12250 CHECK_FIELD(rip); 12254 uint32_t fFlagsMask = UINT32_MAX & ~p IemCpu->fUndefinedEFlags;12251 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags; 12255 12252 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask)) 12256 12253 { … … 12280 12277 } 12281 12278 12282 if (p IemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx)12279 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx) 12283 12280 CHECK_FIELD(rax); 12284 12281 CHECK_FIELD(rcx); 12285 if (!p IemCpu->fIgnoreRaxRdx)12282 if (!pVCpu->iem.s.fIgnoreRaxRdx) 12286 12283 CHECK_FIELD(rdx); 12287 12284 CHECK_FIELD(rbx); … … 12310 12307 if (pOrgCtx->cr2 != pDebugCtx->cr2) 12311 12308 { 12312 if (p IemCpu->uOldCs == 0x1b && pIemCpu->uOldRip == 0x77f61ff3 && fRem)12309 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem) 12313 12310 { /* ignore */ } 12314 12311 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3) … … 12367 12364 * records. 12368 12365 */ 12369 if (cDiffs == 0 && !p IemCpu->fOverlappingMovs)12366 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs) 12370 12367 { 12371 12368 /* … … 12373 12370 * - I/O port accesses should be a 1:1 match. 12374 12371 */ 12375 PIEMVERIFYEVTREC pIemRec = p IemCpu->pIemEvtRecHead;12376 PIEMVERIFYEVTREC pOtherRec = p IemCpu->pOtherEvtRecHead;12372 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead; 12373 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead; 12377 12374 while (pIemRec && pOtherRec) 12378 12375 { … … 12384 12381 { 12385 12382 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE) 12386 iemVerifyWriteRecord(p IemCpu, pIemRec, fRem);12383 iemVerifyWriteRecord(pVCpu, pIemRec, fRem); 12387 12384 pIemRec = pIemRec->pNext; 12388 12385 } … … 12391 12388 if (pIemRec->enmEvent != pOtherRec->enmEvent) 12392 12389 { 12393 iemVerifyAssertRecords(p IemCpu, pIemRec, pOtherRec, "Type mismatches");12390 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches"); 12394 12391 break; 12395 12392 } … … 12431 12428 if (!fEquals) 12432 12429 { 12433 iemVerifyAssertRecords(p IemCpu, pIemRec, pOtherRec, "Mismatch");12430 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch"); 12434 12431 break; 12435 12432 } … … 12444 12441 { 12445 12442 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE) 12446 iemVerifyWriteRecord(p IemCpu, pIemRec, fRem);12443 iemVerifyWriteRecord(pVCpu, pIemRec, fRem); 12447 12444 pIemRec = pIemRec->pNext; 12448 12445 } 12449 12446 if (pIemRec != NULL) 12450 iemVerifyAssertRecord(p IemCpu, pIemRec, "Extra IEM record!");12447 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!"); 12451 12448 else if (pOtherRec != NULL) 12452 iemVerifyAssertRecord(p IemCpu, pOtherRec, "Extra Other record!");12453 } 12454 p IemCpu->CTX_SUFF(pCtx) = pOrgCtx;12449 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!"); 12450 } 12451 pVCpu->iem.s.CTX_SUFF(pCtx) = pOrgCtx; 12455 12452 12456 12453 return rcStrictIem; … … 12460 12457 12461 12458 /* stubs */ 12462 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(P IEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)12463 { 12464 NOREF(p IemCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);12459 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue) 12460 { 12461 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue); 12465 12462 return VERR_INTERNAL_ERROR; 12466 12463 } 12467 12464 12468 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(P IEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)12469 { 12470 NOREF(p IemCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);12465 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue) 12466 { 12467 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue); 12471 12468 return VERR_INTERNAL_ERROR; 12472 12469 } … … 12545 12542 * 12546 12543 * @returns Strict VBox status code to pass up. 12547 * @param p IemCpu The IEM per CPU data.12544 * @param pVCpu The cross context virtual CPU structure of the calling thread. 12548 12545 * @param rcStrict The status from executing an instruction. 12549 12546 */ 12550 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(P IEMCPU pIemCpu, VBOXSTRICTRC rcStrict)12547 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict) 12551 12548 { 12552 12549 if (rcStrict != VINF_SUCCESS) … … 12577 12574 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12578 12575 /** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */ 12579 int32_t const rcPassUp = p IemCpu->rcPassUp;12576 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp; 12580 12577 if (rcPassUp == VINF_SUCCESS) 12581 p IemCpu->cRetInfStatuses++;12578 pVCpu->iem.s.cRetInfStatuses++; 12582 12579 else if ( rcPassUp < VINF_EM_FIRST 12583 12580 || rcPassUp > VINF_EM_LAST … … 12585 12582 { 12586 12583 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict))); 12587 p IemCpu->cRetPassUpStatus++;12584 pVCpu->iem.s.cRetPassUpStatus++; 12588 12585 rcStrict = rcPassUp; 12589 12586 } … … 12591 12588 { 12592 12589 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict))); 12593 p IemCpu->cRetInfStatuses++;12590 pVCpu->iem.s.cRetInfStatuses++; 12594 12591 } 12595 12592 } 12596 12593 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED) 12597 p IemCpu->cRetAspectNotImplemented++;12594 pVCpu->iem.s.cRetAspectNotImplemented++; 12598 12595 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED) 12599 p IemCpu->cRetInstrNotImplemented++;12596 pVCpu->iem.s.cRetInstrNotImplemented++; 12600 12597 #ifdef IEM_VERIFICATION_MODE_FULL 12601 12598 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION) … … 12603 12600 #endif 12604 12601 else 12605 p IemCpu->cRetErrStatuses++;12606 } 12607 else if (p IemCpu->rcPassUp != VINF_SUCCESS)12608 { 12609 p IemCpu->cRetPassUpStatus++;12610 rcStrict = p IemCpu->rcPassUp;12602 pVCpu->iem.s.cRetErrStatuses++; 12603 } 12604 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS) 12605 { 12606 pVCpu->iem.s.cRetPassUpStatus++; 12607 rcStrict = pVCpu->iem.s.rcPassUp; 12611 12608 } 12612 12609 … … 12623 12620 * @return Strict VBox status code. 12624 12621 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 12625 * @param p IemCpu The IEM per CPU data.12622 * @param pVCpu The cross context virtual CPU structure of the calling thread. 12626 12623 * @param fExecuteInhibit If set, execute the instruction following CLI, 12627 12624 * POP SS and MOV SS,GR. 12628 12625 */ 12629 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu,bool fExecuteInhibit)12626 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit) 12630 12627 { 12631 12628 #ifdef IEM_WITH_SETJMP 12632 12629 VBOXSTRICTRC rcStrict; 12633 12630 jmp_buf JmpBuf; 12634 jmp_buf *pSavedJmpBuf = p IemCpu->CTX_SUFF(pJmpBuf);12635 p IemCpu->CTX_SUFF(pJmpBuf) = &JmpBuf;12631 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); 12632 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; 12636 12633 if ((rcStrict = setjmp(JmpBuf)) == 0) 12637 12634 { … … 12640 12637 } 12641 12638 else 12642 p IemCpu->cLongJumps++;12643 p IemCpu->CTX_SUFF(pJmpBuf) = pSavedJmpBuf;12639 pVCpu->iem.s.cLongJumps++; 12640 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf; 12644 12641 #else 12645 12642 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 12647 12644 #endif 12648 12645 if (rcStrict == VINF_SUCCESS) 12649 p IemCpu->cInstructions++;12650 if (p IemCpu->cActiveMappings > 0)12646 pVCpu->iem.s.cInstructions++; 12647 if (pVCpu->iem.s.cActiveMappings > 0) 12651 12648 { 12652 12649 Assert(rcStrict != VINF_SUCCESS); 12653 iemMemRollback(p IemCpu);12650 iemMemRollback(pVCpu); 12654 12651 } 12655 12652 //#ifdef DEBUG 12656 // AssertMsg(p IemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));12653 // AssertMsg(pVCpu->iem.s.offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pVCpu->iem.s.offOpcode, cbInstr)); 12657 12654 //#endif 12658 12655 … … 12662 12659 && rcStrict == VINF_SUCCESS 12663 12660 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 12664 && EMGetInhibitInterruptsPC(pVCpu) == p IemCpu->CTX_SUFF(pCtx)->rip )12665 { 12666 rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, pIemCpu->fBypassHandlers);12661 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->iem.s.CTX_SUFF(pCtx)->rip ) 12662 { 12663 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers); 12667 12664 if (rcStrict == VINF_SUCCESS) 12668 12665 { 12669 12666 #ifdef LOG_ENABLED 12670 iemLogCurInstr( IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx), false);12667 iemLogCurInstr(pVCpu, pVCpu->iem.s.CTX_SUFF(pCtx), false); 12671 12668 #endif 12672 12669 #ifdef IEM_WITH_SETJMP 12673 p IemCpu->CTX_SUFF(pJmpBuf) = &JmpBuf;12670 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; 12674 12671 if ((rcStrict = setjmp(JmpBuf)) == 0) 12675 12672 { … … 12678 12675 } 12679 12676 else 12680 p IemCpu->cLongJumps++;12681 p IemCpu->CTX_SUFF(pJmpBuf) = pSavedJmpBuf;12677 pVCpu->iem.s.cLongJumps++; 12678 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf; 12682 12679 #else 12683 12680 IEM_OPCODE_GET_NEXT_U8(&b); … … 12685 12682 #endif 12686 12683 if (rcStrict == VINF_SUCCESS) 12687 p IemCpu->cInstructions++;12688 if (p IemCpu->cActiveMappings > 0)12684 pVCpu->iem.s.cInstructions++; 12685 if (pVCpu->iem.s.cActiveMappings > 0) 12689 12686 { 12690 12687 Assert(rcStrict != VINF_SUCCESS); 12691 iemMemRollback(p IemCpu);12688 iemMemRollback(pVCpu); 12692 12689 } 12693 12690 } … … 12698 12695 * Return value fiddling, statistics and sanity assertions. 12699 12696 */ 12700 rcStrict = iemExecStatusCodeFiddling(p IemCpu, rcStrict);12701 12702 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->cs));12703 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->ss));12697 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict); 12698 12699 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->cs)); 12700 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->ss)); 12704 12701 #if defined(IEM_VERIFICATION_MODE_FULL) 12705 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->es));12706 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->ds));12707 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->fs));12708 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->gs));12702 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->es)); 12703 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->ds)); 12704 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->fs)); 12705 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->gs)); 12709 12706 #endif 12710 12707 return rcStrict; … … 12717 12714 * 12718 12715 * @returns rcStrict, maybe modified. 12719 * @param pIemCpu The IEM CPU structure. 12720 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 12716 * @param pVCpu The cross context virtual CPU structure of the calling thread. 12721 12717 * @param pCtx The current CPU context. 12722 12718 * @param rcStrict The status code returne by the interpreter. 12723 12719 */ 12724 DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(P IEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)12725 { 12726 if ( !p IemCpu->fInPatchCode12720 DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict) 12721 { 12722 if ( !pVCpu->iem.s.fInPatchCode 12727 12723 && ( rcStrict == VINF_SUCCESS 12728 12724 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */ … … 12750 12746 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu) 12751 12747 { 12752 PIEMCPU pIemCpu = &pVCpu->iem.s;12753 12754 12748 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3) 12755 if (++p IemCpu->cVerifyDepth == 1)12756 iemExecVerificationModeSetup(p IemCpu);12749 if (++pVCpu->iem.s.cVerifyDepth == 1) 12750 iemExecVerificationModeSetup(pVCpu); 12757 12751 #endif 12758 12752 #ifdef LOG_ENABLED 12759 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);12753 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12760 12754 iemLogCurInstr(pVCpu, pCtx, true); 12761 12755 #endif … … 12764 12758 * Do the decoding and emulation. 12765 12759 */ 12766 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, false);12760 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false); 12767 12761 if (rcStrict == VINF_SUCCESS) 12768 rcStrict = iemExecOneInner(pVCpu, pIemCpu,true);12762 rcStrict = iemExecOneInner(pVCpu, true); 12769 12763 12770 12764 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3) … … 12772 12766 * Assert some sanity. 12773 12767 */ 12774 if (p IemCpu->cVerifyDepth == 1)12775 rcStrict = iemExecVerificationModeCheck(p IemCpu, rcStrict);12776 p IemCpu->cVerifyDepth--;12768 if (pVCpu->iem.s.cVerifyDepth == 1) 12769 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict); 12770 pVCpu->iem.s.cVerifyDepth--; 12777 12771 #endif 12778 12772 #ifdef IN_RC 12779 rcStrict = iemRCRawMaybeReenter(p IemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);12773 rcStrict = iemRCRawMaybeReenter(pVCpu, pVCpu->iem.s.CTX_SUFF(pCtx), rcStrict); 12780 12774 #endif 12781 12775 if (rcStrict != VINF_SUCCESS) … … 12788 12782 VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten) 12789 12783 { 12790 PIEMCPU pIemCpu = &pVCpu->iem.s; 12791 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12784 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12792 12785 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 12793 12786 12794 uint32_t const cbOldWritten = p IemCpu->cbWritten;12795 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, false);12787 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten; 12788 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false); 12796 12789 if (rcStrict == VINF_SUCCESS) 12797 12790 { 12798 rcStrict = iemExecOneInner(pVCpu, pIemCpu,true);12791 rcStrict = iemExecOneInner(pVCpu, true); 12799 12792 if (pcbWritten) 12800 *pcbWritten = p IemCpu->cbWritten - cbOldWritten;12793 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten; 12801 12794 } 12802 12795 12803 12796 #ifdef IN_RC 12804 rcStrict = iemRCRawMaybeReenter(p IemCpu, pVCpu, pCtx, rcStrict);12797 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict); 12805 12798 #endif 12806 12799 return rcStrict; … … 12811 12804 const void *pvOpcodeBytes, size_t cbOpcodeBytes) 12812 12805 { 12813 PIEMCPU pIemCpu = &pVCpu->iem.s; 12814 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12806 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12815 12807 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 12816 12808 … … 12819 12811 && pCtx->rip == OpcodeBytesPC) 12820 12812 { 12821 iemInitDecoder(p IemCpu, false);12822 p IemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));12823 memcpy(p IemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);12813 iemInitDecoder(pVCpu, false); 12814 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); 12815 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode); 12824 12816 rcStrict = VINF_SUCCESS; 12825 12817 } 12826 12818 else 12827 rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, false);12819 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false); 12828 12820 if (rcStrict == VINF_SUCCESS) 12829 12821 { 12830 rcStrict = iemExecOneInner(pVCpu, pIemCpu,true);12822 rcStrict = iemExecOneInner(pVCpu, true); 12831 12823 } 12832 12824 12833 12825 #ifdef IN_RC 12834 rcStrict = iemRCRawMaybeReenter(p IemCpu, pVCpu, pCtx, rcStrict);12826 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict); 12835 12827 #endif 12836 12828 return rcStrict; … … 12840 12832 VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten) 12841 12833 { 12842 PIEMCPU pIemCpu = &pVCpu->iem.s; 12843 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12834 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12844 12835 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 12845 12836 12846 uint32_t const cbOldWritten = p IemCpu->cbWritten;12847 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, true);12837 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten; 12838 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true); 12848 12839 if (rcStrict == VINF_SUCCESS) 12849 12840 { 12850 rcStrict = iemExecOneInner(pVCpu, pIemCpu,false);12841 rcStrict = iemExecOneInner(pVCpu, false); 12851 12842 if (pcbWritten) 12852 *pcbWritten = p IemCpu->cbWritten - cbOldWritten;12843 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten; 12853 12844 } 12854 12845 12855 12846 #ifdef IN_RC 12856 rcStrict = iemRCRawMaybeReenter(p IemCpu, pVCpu, pCtx, rcStrict);12847 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict); 12857 12848 #endif 12858 12849 return rcStrict; … … 12863 12854 const void *pvOpcodeBytes, size_t cbOpcodeBytes) 12864 12855 { 12865 PIEMCPU pIemCpu = &pVCpu->iem.s; 12866 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12856 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12867 12857 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 12868 12858 … … 12871 12861 && pCtx->rip == OpcodeBytesPC) 12872 12862 { 12873 iemInitDecoder(p IemCpu, true);12874 p IemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));12875 memcpy(p IemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);12863 iemInitDecoder(pVCpu, true); 12864 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); 12865 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode); 12876 12866 rcStrict = VINF_SUCCESS; 12877 12867 } 12878 12868 else 12879 rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, true);12869 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true); 12880 12870 if (rcStrict == VINF_SUCCESS) 12881 rcStrict = iemExecOneInner(pVCpu, pIemCpu,false);12871 rcStrict = iemExecOneInner(pVCpu, false); 12882 12872 12883 12873 #ifdef IN_RC 12884 rcStrict = iemRCRawMaybeReenter(p IemCpu, pVCpu, pCtx, rcStrict);12874 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict); 12885 12875 #endif 12886 12876 return rcStrict; … … 12905 12895 uint32_t *pcbWritten) 12906 12896 { 12907 PIEMCPU pIemCpu = &pVCpu->iem.s;12908 12897 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12909 12898 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3); 12910 12899 12911 uint32_t const cbOldWritten = p IemCpu->cbWritten;12900 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten; 12912 12901 VBOXSTRICTRC rcStrict; 12913 12902 if ( cbOpcodeBytes 12914 12903 && pCtx->rip == OpcodeBytesPC) 12915 12904 { 12916 iemInitDecoder(p IemCpu, true);12917 p IemCpu->cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pIemCpu->abOpcode));12918 memcpy(p IemCpu->abOpcode, pvOpcodeBytes, pIemCpu->cbOpcode);12905 iemInitDecoder(pVCpu, true); 12906 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode)); 12907 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode); 12919 12908 rcStrict = VINF_SUCCESS; 12920 12909 } 12921 12910 else 12922 rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, true);12911 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true); 12923 12912 if (rcStrict == VINF_SUCCESS) 12924 12913 { 12925 rcStrict = iemExecOneInner(pVCpu, pIemCpu,false);12914 rcStrict = iemExecOneInner(pVCpu, false); 12926 12915 if (pcbWritten) 12927 *pcbWritten = p IemCpu->cbWritten - cbOldWritten;12916 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten; 12928 12917 } 12929 12918 12930 12919 #ifdef IN_RC 12931 rcStrict = iemRCRawMaybeReenter(p IemCpu, pVCpu, pCtx, rcStrict);12920 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict); 12932 12921 #endif 12933 12922 return rcStrict; … … 12937 12926 VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions) 12938 12927 { 12939 PIEMCPU pIemCpu = &pVCpu->iem.s; 12940 uint32_t const cInstructionsAtStart = pIemCpu->cInstructions; 12928 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions; 12941 12929 12942 12930 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3) … … 12944 12932 * See if there is an interrupt pending in TRPM, inject it if we can. 12945 12933 */ 12946 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);12934 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12947 12935 # ifdef IEM_VERIFICATION_MODE_FULL 12948 p IemCpu->uInjectCpl = UINT8_MAX;12936 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 12949 12937 # endif 12950 12938 if ( pCtx->eflags.Bits.u1IF … … 12958 12946 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); 12959 12947 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */); 12960 if (!IEM_VERIFICATION_ENABLED(p IemCpu))12948 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 12961 12949 TRPMResetTrap(pVCpu); 12962 12950 } … … 12972 12960 * Do the decoding and emulation. 12973 12961 */ 12974 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, false);12962 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false); 12975 12963 if (rcStrict == VINF_SUCCESS) 12976 rcStrict = iemExecOneInner(pVCpu, pIemCpu,true);12964 rcStrict = iemExecOneInner(pVCpu, true); 12977 12965 12978 12966 /* 12979 12967 * Assert some sanity. 12980 12968 */ 12981 rcStrict = iemExecVerificationModeCheck(p IemCpu, rcStrict);12969 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict); 12982 12970 12983 12971 /* … … 12988 12976 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict))); 12989 12977 if (pcInstructions) 12990 *pcInstructions = p IemCpu->cInstructions - cInstructionsAtStart;12978 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart; 12991 12979 return rcStrict; 12992 12980 … … 12996 12984 * See if there is an interrupt pending in TRPM, inject it if we can. 12997 12985 */ 12998 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);12986 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 12999 12987 # ifdef IEM_VERIFICATION_MODE_FULL 13000 p IemCpu->uInjectCpl = UINT8_MAX;12988 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 13001 12989 # endif 13002 12990 if ( pCtx->eflags.Bits.u1IF … … 13010 12998 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); 13011 12999 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */); 13012 if (!IEM_VERIFICATION_ENABLED(p IemCpu))13000 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 13013 13001 TRPMResetTrap(pVCpu); 13014 13002 } … … 13017 13005 * Initial decoder init w/ prefetch, then setup setjmp. 13018 13006 */ 13019 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(p IemCpu, false);13007 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false); 13020 13008 if (rcStrict == VINF_SUCCESS) 13021 13009 { 13022 13010 # ifdef IEM_WITH_SETJMP 13023 13011 jmp_buf JmpBuf; 13024 jmp_buf *pSavedJmpBuf = p IemCpu->CTX_SUFF(pJmpBuf);13025 p IemCpu->CTX_SUFF(pJmpBuf) = &JmpBuf;13026 p IemCpu->cActiveMappings = 0;13012 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); 13013 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; 13014 pVCpu->iem.s.cActiveMappings = 0; 13027 13015 if ((rcStrict = setjmp(JmpBuf)) == 0) 13028 13016 # endif … … 13049 13037 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 13050 13038 { 13051 Assert(p IemCpu->cActiveMappings == 0);13052 p IemCpu->cInstructions++;13053 if (RT_LIKELY(p IemCpu->rcPassUp == VINF_SUCCESS))13039 Assert(pVCpu->iem.s.cActiveMappings == 0); 13040 pVCpu->iem.s.cInstructions++; 13041 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS)) 13054 13042 { 13055 13043 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_INHIBIT_INTERRUPTS) … … 13057 13045 && cInstr-- > 0 )) 13058 13046 { 13059 iemReInitDecoder(pVCpu , pIemCpu);13047 iemReInitDecoder(pVCpu); 13060 13048 continue; 13061 13049 } 13062 13050 } 13063 13051 } 13064 else if (p IemCpu->cActiveMappings > 0) /** @todo This should only happen when rcStrict != VINF_SUCCESS! */13065 iemMemRollback(p IemCpu);13066 rcStrict = iemExecStatusCodeFiddling(p IemCpu, rcStrict);13052 else if (pVCpu->iem.s.cActiveMappings > 0) /** @todo This should only happen when rcStrict != VINF_SUCCESS! */ 13053 iemMemRollback(pVCpu); 13054 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict); 13067 13055 break; 13068 13056 } … … 13071 13059 else 13072 13060 { 13073 if (p IemCpu->cActiveMappings > 0)13074 iemMemRollback(p IemCpu);13075 p IemCpu->cLongJumps++;13061 if (pVCpu->iem.s.cActiveMappings > 0) 13062 iemMemRollback(pVCpu); 13063 pVCpu->iem.s.cLongJumps++; 13076 13064 } 13077 13065 # endif … … 13080 13068 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder). 13081 13069 */ 13082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->cs));13083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->ss));13070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->cs)); 13071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->ss)); 13084 13072 # if defined(IEM_VERIFICATION_MODE_FULL) 13085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->es));13086 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->ds));13087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->fs));13088 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p IemCpu->CTX_SUFF(pCtx)->gs));13073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->es)); 13074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->ds)); 13075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->fs)); 13076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->iem.s.CTX_SUFF(pCtx)->gs)); 13089 13077 # endif 13090 13078 } … … 13094 13082 */ 13095 13083 # ifdef IN_RC 13096 rcStrict = iemRCRawMaybeReenter(p IemCpu, pVCpu, pIemCpu->CTX_SUFF(pCtx), rcStrict);13084 rcStrict = iemRCRawMaybeReenter(pVCpu, pVCpu->iem.s.CTX_SUFF(pCtx), rcStrict); 13097 13085 # endif 13098 13086 if (rcStrict != VINF_SUCCESS) … … 13100 13088 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict))); 13101 13089 if (pcInstructions) 13102 *pcInstructions = p IemCpu->cInstructions - cInstructionsAtStart;13090 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart; 13103 13091 return rcStrict; 13104 13092 #endif /* Not verification mode */ … … 13125 13113 uint8_t cbInstr) 13126 13114 { 13127 iemInitDecoder( &pVCpu->iem.s, false);13115 iemInitDecoder(pVCpu, false); 13128 13116 #ifdef DBGFTRACE_ENABLED 13129 13117 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx", … … 13171 13159 } 13172 13160 13173 return iemRaiseXcptOrInt( &pVCpu->iem.s, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);13161 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2); 13174 13162 } 13175 13163 … … 13233 13221 VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore) 13234 13222 { 13235 PIEMCPU pIemCpu = &pVCpu->iem.s; 13236 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 13223 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 13237 13224 13238 13225 iemCtxCoreToCtx(pCtx, pCtxCore); 13239 iemInitDecoder(p IemCpu);13240 VBOXSTRICTRC rcStrict = iemCImpl_iret(p IemCpu, 1, pIemCpu->enmDefOpSize);13226 iemInitDecoder(pVCpu); 13227 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize); 13241 13228 if (rcStrict == VINF_SUCCESS) 13242 13229 iemCtxToCtxCore(pCtxCore, pCtx); … … 13268 13255 * 13269 13256 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller. 13270 * @param p IemCpu The IEM per-CPU structure.13271 * @param rcStrict 13272 */ 13273 DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(P IEMCPU pIemCpu, VBOXSTRICTRC rcStrict)13274 { 13275 iemUninitExec(p IemCpu);13257 * @param pVCpu The cross context virtual CPU structure of the calling thread. 13258 * @param rcStrict The status code to fiddle. 13259 */ 13260 DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict) 13261 { 13262 iemUninitExec(pVCpu); 13276 13263 #ifdef IN_RC 13277 return iemRCRawMaybeReenter(p IemCpu, IEMCPU_TO_VMCPU(pIemCpu), pIemCpu->CTX_SUFF(pCtx),13278 iemExecStatusCodeFiddling(p IemCpu, rcStrict));13264 return iemRCRawMaybeReenter(pVCpu, pVCpu->iem.s.CTX_SUFF(pCtx), 13265 iemExecStatusCodeFiddling(pVCpu, rcStrict)); 13279 13266 #else 13280 return iemExecStatusCodeFiddling(p IemCpu, rcStrict);13267 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 13281 13268 #endif 13282 13269 } … … 13311 13298 * State init. 13312 13299 */ 13313 PIEMCPU pIemCpu = &pVCpu->iem.s; 13314 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13300 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13315 13301 13316 13302 /* … … 13325 13311 switch (cbValue) 13326 13312 { 13327 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13328 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13329 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13313 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13314 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13315 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13330 13316 default: 13331 13317 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13336 13322 switch (cbValue) 13337 13323 { 13338 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13339 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13340 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13324 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13325 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13326 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13341 13327 default: 13342 13328 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13347 13333 switch (cbValue) 13348 13334 { 13349 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13350 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13351 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13335 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13336 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13337 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13352 13338 default: 13353 13339 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13366 13352 switch (cbValue) 13367 13353 { 13368 case 1: rcStrict = iemCImpl_outs_op8_addr16(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13369 case 2: rcStrict = iemCImpl_outs_op16_addr16(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13370 case 4: rcStrict = iemCImpl_outs_op32_addr16(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13354 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13355 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13356 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13371 13357 default: 13372 13358 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13377 13363 switch (cbValue) 13378 13364 { 13379 case 1: rcStrict = iemCImpl_outs_op8_addr32(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13380 case 2: rcStrict = iemCImpl_outs_op16_addr32(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13381 case 4: rcStrict = iemCImpl_outs_op32_addr32(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13365 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13366 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13367 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13382 13368 default: 13383 13369 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13388 13374 switch (cbValue) 13389 13375 { 13390 case 1: rcStrict = iemCImpl_outs_op8_addr64(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13391 case 2: rcStrict = iemCImpl_outs_op16_addr64(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13392 case 4: rcStrict = iemCImpl_outs_op32_addr64(p IemCpu, cbInstr, iEffSeg, fIoChecked); break;13376 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13377 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13378 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break; 13393 13379 default: 13394 13380 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13401 13387 } 13402 13388 13403 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13389 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13404 13390 } 13405 13391 … … 13431 13417 * State init. 13432 13418 */ 13433 PIEMCPU pIemCpu = &pVCpu->iem.s; 13434 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13419 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13435 13420 13436 13421 /* … … 13445 13430 switch (cbValue) 13446 13431 { 13447 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(p IemCpu, cbInstr, fIoChecked); break;13448 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(p IemCpu, cbInstr, fIoChecked); break;13449 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(p IemCpu, cbInstr, fIoChecked); break;13432 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break; 13433 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break; 13434 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break; 13450 13435 default: 13451 13436 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13456 13441 switch (cbValue) 13457 13442 { 13458 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(p IemCpu, cbInstr, fIoChecked); break;13459 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(p IemCpu, cbInstr, fIoChecked); break;13460 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(p IemCpu, cbInstr, fIoChecked); break;13443 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break; 13444 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break; 13445 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break; 13461 13446 default: 13462 13447 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13467 13452 switch (cbValue) 13468 13453 { 13469 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(p IemCpu, cbInstr, fIoChecked); break;13470 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(p IemCpu, cbInstr, fIoChecked); break;13471 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(p IemCpu, cbInstr, fIoChecked); break;13454 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break; 13455 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break; 13456 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break; 13472 13457 default: 13473 13458 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13486 13471 switch (cbValue) 13487 13472 { 13488 case 1: rcStrict = iemCImpl_ins_op8_addr16(p IemCpu, cbInstr, fIoChecked); break;13489 case 2: rcStrict = iemCImpl_ins_op16_addr16(p IemCpu, cbInstr, fIoChecked); break;13490 case 4: rcStrict = iemCImpl_ins_op32_addr16(p IemCpu, cbInstr, fIoChecked); break;13473 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break; 13474 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break; 13475 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break; 13491 13476 default: 13492 13477 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13497 13482 switch (cbValue) 13498 13483 { 13499 case 1: rcStrict = iemCImpl_ins_op8_addr32(p IemCpu, cbInstr, fIoChecked); break;13500 case 2: rcStrict = iemCImpl_ins_op16_addr32(p IemCpu, cbInstr, fIoChecked); break;13501 case 4: rcStrict = iemCImpl_ins_op32_addr32(p IemCpu, cbInstr, fIoChecked); break;13484 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break; 13485 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break; 13486 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break; 13502 13487 default: 13503 13488 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13508 13493 switch (cbValue) 13509 13494 { 13510 case 1: rcStrict = iemCImpl_ins_op8_addr64(p IemCpu, cbInstr, fIoChecked); break;13511 case 2: rcStrict = iemCImpl_ins_op16_addr64(p IemCpu, cbInstr, fIoChecked); break;13512 case 4: rcStrict = iemCImpl_ins_op32_addr64(p IemCpu, cbInstr, fIoChecked); break;13495 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break; 13496 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break; 13497 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break; 13513 13498 default: 13514 13499 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE); … … 13521 13506 } 13522 13507 13523 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13508 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13524 13509 } 13525 13510 … … 13541 13526 Assert(cbReg <= 4 && cbReg != 3); 13542 13527 13543 PIEMCPU pIemCpu = &pVCpu->iem.s; 13544 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13528 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13545 13529 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg); 13546 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13530 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13547 13531 } 13548 13532 … … 13562 13546 Assert(cbReg <= 4 && cbReg != 3); 13563 13547 13564 PIEMCPU pIemCpu = &pVCpu->iem.s; 13565 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13548 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13566 13549 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg); 13567 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13550 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13568 13551 } 13569 13552 … … 13586 13569 Assert(iGReg < 16); 13587 13570 13588 PIEMCPU pIemCpu = &pVCpu->iem.s; 13589 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13571 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13590 13572 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg); 13591 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13573 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13592 13574 } 13593 13575 … … 13610 13592 Assert(iGReg < 16); 13611 13593 13612 PIEMCPU pIemCpu = &pVCpu->iem.s; 13613 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13594 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13614 13595 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg); 13615 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13596 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13616 13597 } 13617 13598 … … 13630 13611 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2); 13631 13612 13632 PIEMCPU pIemCpu = &pVCpu->iem.s; 13633 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13613 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13634 13614 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts); 13635 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13615 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13636 13616 } 13637 13617 … … 13651 13631 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 13652 13632 13653 PIEMCPU pIemCpu = &pVCpu->iem.s; 13654 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13633 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13655 13634 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue); 13656 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13635 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13657 13636 } 13658 13637 … … 13673 13652 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 13674 13653 13675 PIEMCPU pIemCpu = &pVCpu->iem.s; 13676 iemInitExec(pIemCpu, false /*fBypassHandlers*/); 13654 iemInitExec(pVCpu, false /*fBypassHandlers*/); 13677 13655 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv); 13678 return iemUninitExecAndFiddleStatusAndMaybeReenter(p IemCpu, rcStrict);13656 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 13679 13657 } 13680 13658 … … 13689 13667 * with @a rcStrict. 13690 13668 * @param iMemMap The memory mapping index. For error reporting only. 13691 * @param p IemCpu The IEMCPU structure of the calling EMT, for error13692 * reporting only.13669 * @param pVCpu The cross context virtual CPU structure of the calling 13670 * thread, for error reporting only. 13693 13671 */ 13694 13672 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, 13695 unsigned iMemMap, P IEMCPU pIemCpu)13673 unsigned iMemMap, PVMCPU pVCpu) 13696 13674 { 13697 13675 if (RT_FAILURE_NP(rcStrict)) … … 13706 13684 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n", 13707 13685 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap, 13708 p IemCpu->aMemMappings[iMemMap].fAccess,13709 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, pIemCpu->aMemBbMappings[iMemMap].cbFirst,13710 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, pIemCpu->aMemBbMappings[iMemMap].cbSecond));13686 pVCpu->iem.s.aMemMappings[iMemMap].fAccess, 13687 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 13688 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)); 13711 13689 return VERR_IOM_FF_STATUS_IPE; 13712 13690 } … … 13721 13699 * with @a rcStrict. 13722 13700 * @param iMemMap The memory mapping index. For error reporting only. 13723 * @param p IemCpu The IEMCPU structure of the calling EMT, for error13724 * reporting only.13725 */ 13726 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, P IEMCPU pIemCpu)13701 * @param pVCpu The cross context virtual CPU structure of the calling 13702 * thread, for error reporting only. 13703 */ 13704 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu) 13727 13705 { 13728 13706 /* Simple. */ … … 13743 13721 13744 13722 /* Unlikely */ 13745 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, p IemCpu);13723 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu); 13746 13724 } 13747 13725 … … 13757 13735 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict) 13758 13736 { 13759 PIEMCPU pIemCpu = &pVCpu->iem.s;13760 13761 13737 /* 13762 13738 * Reset the pending commit. 13763 13739 */ 13764 AssertMsg( (p IemCpu->aMemMappings[0].fAccess | pIemCpu->aMemMappings[1].fAccess | pIemCpu->aMemMappings[2].fAccess)13740 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess) 13765 13741 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND), 13766 13742 ("%#x %#x %#x\n", 13767 p IemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess, pIemCpu->aMemMappings[2].fAccess));13743 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess)); 13768 13744 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM); 13769 13745 … … 13772 13748 */ 13773 13749 unsigned cBufs = 0; 13774 unsigned iMemMap = RT_ELEMENTS(p IemCpu->aMemMappings);13750 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings); 13775 13751 while (iMemMap-- > 0) 13776 if (p IemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))13752 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND)) 13777 13753 { 13778 Assert(p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);13779 Assert(p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);13780 Assert(!p IemCpu->aMemBbMappings[iMemMap].fUnassigned);13781 13782 uint16_t const cbFirst = p IemCpu->aMemBbMappings[iMemMap].cbFirst;13783 uint16_t const cbSecond = p IemCpu->aMemBbMappings[iMemMap].cbSecond;13784 uint8_t const *pbBuf = &p IemCpu->aBounceBuffers[iMemMap].ab[0];13785 13786 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)13754 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE); 13755 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED); 13756 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned); 13757 13758 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst; 13759 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond; 13760 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]; 13761 13762 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST) 13787 13763 { 13788 13764 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM, 13789 p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst,13765 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, 13790 13766 pbBuf, 13791 13767 cbFirst, 13792 13768 PGMACCESSORIGIN_IEM); 13793 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, p IemCpu);13769 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu); 13794 13770 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n", 13795 iMemMap, p IemCpu->aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,13771 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 13796 13772 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict))); 13797 13773 } 13798 13774 13799 if (p IemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)13775 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND) 13800 13776 { 13801 13777 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM, 13802 p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond,13778 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, 13803 13779 pbBuf + cbFirst, 13804 13780 cbSecond, 13805 13781 PGMACCESSORIGIN_IEM); 13806 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, p IemCpu);13782 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu); 13807 13783 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n", 13808 iMemMap, p IemCpu->aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,13784 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, 13809 13785 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict))); 13810 13786 } 13811 13787 cBufs++; 13812 p IemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;13788 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 13813 13789 } 13814 13790 13815 AssertMsg(cBufs > 0 && cBufs == p IemCpu->cActiveMappings,13816 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, p IemCpu->cActiveMappings,13817 p IemCpu->aMemMappings[0].fAccess, pIemCpu->aMemMappings[1].fAccess, pIemCpu->aMemMappings[2].fAccess));13818 p IemCpu->cActiveMappings = 0;13791 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings, 13792 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings, 13793 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess)); 13794 pVCpu->iem.s.cActiveMappings = 0; 13819 13795 return rcStrict; 13820 13796 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r61968 r62015 26 26 * @returns Strict VBox status code. 27 27 * 28 * @param p IemCpu The IEM per CPU data.28 * @param pVCpu The cross context virtual CPU structure of the calling thread. 29 29 * @param pCtx The register context. 30 30 * @param u16Port The port number. 31 31 * @param cbOperand The operand size. 32 32 */ 33 static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(P IEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)33 static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand) 34 34 { 35 35 /* The TSS bits we're interested in are the same on 386 and AMD64. */ … … 48 48 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n", 49 49 u16Port, cbOperand, pCtx->tr.Attr.n.u4Type, pCtx->tr.Attr.u)); 50 return iemRaiseGeneralProtectionFault0(p IemCpu);50 return iemRaiseGeneralProtectionFault0(pVCpu); 51 51 } 52 52 … … 55 55 */ 56 56 uint16_t offBitmap; 57 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(p IemCpu, &offBitmap, UINT8_MAX,57 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX, 58 58 pCtx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap)); 59 59 if (rcStrict != VINF_SUCCESS) … … 75 75 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n", 76 76 offFirstBit, pCtx->tr.u32Limit)); 77 return iemRaiseGeneralProtectionFault0(p IemCpu);77 return iemRaiseGeneralProtectionFault0(pVCpu); 78 78 } 79 79 … … 85 85 * 2nd byte when it's not required. */ 86 86 uint16_t bmBytes = UINT16_MAX; 87 rcStrict = iemMemFetchSysU16(p IemCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit);87 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pCtx->tr.u64Base + offFirstBit); 88 88 if (rcStrict != VINF_SUCCESS) 89 89 { … … 101 101 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n", 102 102 u16Port, cbOperand, bmBytes, fPortMask)); 103 return iemRaiseGeneralProtectionFault0(p IemCpu);103 return iemRaiseGeneralProtectionFault0(pVCpu); 104 104 } 105 105 … … 115 115 * @returns Strict VBox status code. 116 116 * 117 * @param p IemCpu The IEM per CPU data.117 * @param pVCpu The cross context virtual CPU structure of the calling thread. 118 118 * @param pCtx The register context. 119 119 * @param u16Port The port number. 120 120 * @param cbOperand The operand size. 121 121 */ 122 DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(P IEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)122 DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand) 123 123 { 124 124 X86EFLAGS Efl; 125 Efl.u = IEMMISC_GET_EFL(p IemCpu, pCtx);125 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx); 126 126 if ( (pCtx->cr0 & X86_CR0_PE) 127 && ( p IemCpu->uCpl > Efl.Bits.u2IOPL127 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL 128 128 || Efl.Bits.u1VM) ) 129 return iemHlpCheckPortIOPermissionBitmap(p IemCpu, pCtx, u16Port, cbOperand);129 return iemHlpCheckPortIOPermissionBitmap(pVCpu, pCtx, u16Port, cbOperand); 130 130 return VINF_SUCCESS; 131 131 } … … 169 169 * Updates the specified flags according to a 8-bit result. 170 170 * 171 * @param p IemCpu The IEM state of the calling EMT.171 * @param pVCpu The cross context virtual CPU structure of the calling thread. 172 172 * @param u8Result The result to set the flags according to. 173 173 * @param fToUpdate The flags to update. 174 174 * @param fUndefined The flags that are specified as undefined. 175 175 */ 176 static void iemHlpUpdateArithEFlagsU8(P IEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)177 { 178 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);176 static void iemHlpUpdateArithEFlagsU8(PVMCPU pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined) 177 { 178 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 179 179 180 180 uint32_t fEFlags = pCtx->eflags.u; … … 183 183 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags; 184 184 #ifdef IEM_VERIFICATION_MODE_FULL 185 p IemCpu->fUndefinedEFlags |= fUndefined;185 pVCpu->iem.s.fUndefinedEFlags |= fUndefined; 186 186 #endif 187 187 } … … 194 194 * @param pSReg Pointer to the segment register. 195 195 */ 196 static void iemHlpAdjustSelectorForNewCpl(P IEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)196 static void iemHlpAdjustSelectorForNewCpl(PVMCPU pVCpu, uint8_t uCpl, PCPUMSELREG pSReg) 197 197 { 198 198 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 199 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg))200 CPUMGuestLazyLoadHiddenSelectorReg( IEMCPU_TO_VMCPU(pIemCpu), pSReg);199 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 200 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); 201 201 #else 202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pSReg));202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 203 203 #endif 204 204 … … 207 207 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 208 208 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */ 209 iemHlpLoadNullDataSelectorProt(p IemCpu, pSReg, 0);209 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0); 210 210 } 211 211 … … 214 214 * Indicates that we have modified the FPU state. 215 215 * 216 * @param p IemCpu The IEM state of the calling EMT.217 */ 218 DECLINLINE(void) iemHlpUsedFpu(P IEMCPU pIemCpu)219 { 220 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);216 * @param pVCpu The cross context virtual CPU structure of the calling thread. 217 */ 218 DECLINLINE(void) iemHlpUsedFpu(PVMCPU pVCpu) 219 { 220 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM); 221 221 } 222 222 … … 232 232 IEM_CIMPL_DEF_0(iemCImpl_popa_16) 233 233 { 234 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);235 RTGCPTR GCPtrStart = iemRegGetEffRsp(p IemCpu, pCtx);234 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 235 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx); 236 236 RTGCPTR GCPtrLast = GCPtrStart + 15; 237 237 VBOXSTRICTRC rcStrict; … … 245 245 */ 246 246 /** @todo do popa boundary / wrap-around checks. */ 247 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(p IemCpu)247 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu) 248 248 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */ 249 249 { … … 251 251 RTUINT64U TmpRsp; 252 252 TmpRsp.u = pCtx->rsp; 253 rcStrict = iemMemStackPopU16Ex(p IemCpu, &pCtx->di, &TmpRsp);253 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->di, &TmpRsp); 254 254 if (rcStrict == VINF_SUCCESS) 255 rcStrict = iemMemStackPopU16Ex(p IemCpu, &pCtx->si, &TmpRsp);255 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->si, &TmpRsp); 256 256 if (rcStrict == VINF_SUCCESS) 257 rcStrict = iemMemStackPopU16Ex(p IemCpu, &pCtx->bp, &TmpRsp);257 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bp, &TmpRsp); 258 258 if (rcStrict == VINF_SUCCESS) 259 259 { 260 iemRegAddToRspEx(p IemCpu, pCtx, &TmpRsp, 2); /* sp */261 rcStrict = iemMemStackPopU16Ex(p IemCpu, &pCtx->bx, &TmpRsp);260 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */ 261 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->bx, &TmpRsp); 262 262 } 263 263 if (rcStrict == VINF_SUCCESS) 264 rcStrict = iemMemStackPopU16Ex(p IemCpu, &pCtx->dx, &TmpRsp);264 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->dx, &TmpRsp); 265 265 if (rcStrict == VINF_SUCCESS) 266 rcStrict = iemMemStackPopU16Ex(p IemCpu, &pCtx->cx, &TmpRsp);266 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->cx, &TmpRsp); 267 267 if (rcStrict == VINF_SUCCESS) 268 rcStrict = iemMemStackPopU16Ex(p IemCpu, &pCtx->ax, &TmpRsp);268 rcStrict = iemMemStackPopU16Ex(pVCpu, &pCtx->ax, &TmpRsp); 269 269 if (rcStrict == VINF_SUCCESS) 270 270 { 271 271 pCtx->rsp = TmpRsp.u; 272 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);272 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 273 273 } 274 274 } … … 276 276 { 277 277 uint16_t const *pa16Mem = NULL; 278 rcStrict = iemMemMap(p IemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);278 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R); 279 279 if (rcStrict == VINF_SUCCESS) 280 280 { … … 287 287 pCtx->cx = pa16Mem[7 - X86_GREG_xCX]; 288 288 pCtx->ax = pa16Mem[7 - X86_GREG_xAX]; 289 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);289 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R); 290 290 if (rcStrict == VINF_SUCCESS) 291 291 { 292 iemRegAddToRsp(p IemCpu, pCtx, 16);293 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);292 iemRegAddToRsp(pVCpu, pCtx, 16); 293 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 294 294 } 295 295 } … … 304 304 IEM_CIMPL_DEF_0(iemCImpl_popa_32) 305 305 { 306 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);307 RTGCPTR GCPtrStart = iemRegGetEffRsp(p IemCpu, pCtx);306 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 307 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx); 308 308 RTGCPTR GCPtrLast = GCPtrStart + 31; 309 309 VBOXSTRICTRC rcStrict; … … 317 317 */ 318 318 /** @todo do popa boundary / wrap-around checks. */ 319 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(p IemCpu)319 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu) 320 320 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */ 321 321 { … … 323 323 RTUINT64U TmpRsp; 324 324 TmpRsp.u = pCtx->rsp; 325 rcStrict = iemMemStackPopU32Ex(p IemCpu, &pCtx->edi, &TmpRsp);325 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edi, &TmpRsp); 326 326 if (rcStrict == VINF_SUCCESS) 327 rcStrict = iemMemStackPopU32Ex(p IemCpu, &pCtx->esi, &TmpRsp);327 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->esi, &TmpRsp); 328 328 if (rcStrict == VINF_SUCCESS) 329 rcStrict = iemMemStackPopU32Ex(p IemCpu, &pCtx->ebp, &TmpRsp);329 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebp, &TmpRsp); 330 330 if (rcStrict == VINF_SUCCESS) 331 331 { 332 iemRegAddToRspEx(p IemCpu, pCtx, &TmpRsp, 2); /* sp */333 rcStrict = iemMemStackPopU32Ex(p IemCpu, &pCtx->ebx, &TmpRsp);332 iemRegAddToRspEx(pVCpu, pCtx, &TmpRsp, 2); /* sp */ 333 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ebx, &TmpRsp); 334 334 } 335 335 if (rcStrict == VINF_SUCCESS) 336 rcStrict = iemMemStackPopU32Ex(p IemCpu, &pCtx->edx, &TmpRsp);336 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->edx, &TmpRsp); 337 337 if (rcStrict == VINF_SUCCESS) 338 rcStrict = iemMemStackPopU32Ex(p IemCpu, &pCtx->ecx, &TmpRsp);338 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->ecx, &TmpRsp); 339 339 if (rcStrict == VINF_SUCCESS) 340 rcStrict = iemMemStackPopU32Ex(p IemCpu, &pCtx->eax, &TmpRsp);340 rcStrict = iemMemStackPopU32Ex(pVCpu, &pCtx->eax, &TmpRsp); 341 341 if (rcStrict == VINF_SUCCESS) 342 342 { … … 351 351 #endif 352 352 pCtx->rsp = TmpRsp.u; 353 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);353 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 354 354 } 355 355 } … … 357 357 { 358 358 uint32_t const *pa32Mem; 359 rcStrict = iemMemMap(p IemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);359 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R); 360 360 if (rcStrict == VINF_SUCCESS) 361 361 { … … 368 368 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX]; 369 369 pCtx->rax = pa32Mem[7 - X86_GREG_xAX]; 370 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);370 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R); 371 371 if (rcStrict == VINF_SUCCESS) 372 372 { 373 iemRegAddToRsp(p IemCpu, pCtx, 32);374 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);373 iemRegAddToRsp(pVCpu, pCtx, 32); 374 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 375 375 } 376 376 } … … 385 385 IEM_CIMPL_DEF_0(iemCImpl_pusha_16) 386 386 { 387 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);388 RTGCPTR GCPtrTop = iemRegGetEffRsp(p IemCpu, pCtx);387 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 388 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx); 389 389 RTGCPTR GCPtrBottom = GCPtrTop - 15; 390 390 VBOXSTRICTRC rcStrict; … … 399 399 /** @todo do pusha boundary / wrap-around checks. */ 400 400 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop 401 && IEM_IS_REAL_OR_V86_MODE(p IemCpu) ) )401 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) ) 402 402 { 403 403 /* word-by-word */ 404 404 RTUINT64U TmpRsp; 405 405 TmpRsp.u = pCtx->rsp; 406 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->ax, &TmpRsp);406 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->ax, &TmpRsp); 407 407 if (rcStrict == VINF_SUCCESS) 408 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->cx, &TmpRsp);408 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->cx, &TmpRsp); 409 409 if (rcStrict == VINF_SUCCESS) 410 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->dx, &TmpRsp);410 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->dx, &TmpRsp); 411 411 if (rcStrict == VINF_SUCCESS) 412 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->bx, &TmpRsp);412 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bx, &TmpRsp); 413 413 if (rcStrict == VINF_SUCCESS) 414 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->sp, &TmpRsp);414 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->sp, &TmpRsp); 415 415 if (rcStrict == VINF_SUCCESS) 416 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->bp, &TmpRsp);416 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->bp, &TmpRsp); 417 417 if (rcStrict == VINF_SUCCESS) 418 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->si, &TmpRsp);418 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->si, &TmpRsp); 419 419 if (rcStrict == VINF_SUCCESS) 420 rcStrict = iemMemStackPushU16Ex(p IemCpu, pCtx->di, &TmpRsp);420 rcStrict = iemMemStackPushU16Ex(pVCpu, pCtx->di, &TmpRsp); 421 421 if (rcStrict == VINF_SUCCESS) 422 422 { 423 423 pCtx->rsp = TmpRsp.u; 424 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);424 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 425 425 } 426 426 } … … 429 429 GCPtrBottom--; 430 430 uint16_t *pa16Mem = NULL; 431 rcStrict = iemMemMap(p IemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);431 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W); 432 432 if (rcStrict == VINF_SUCCESS) 433 433 { … … 440 440 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx; 441 441 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax; 442 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);442 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W); 443 443 if (rcStrict == VINF_SUCCESS) 444 444 { 445 iemRegSubFromRsp(p IemCpu, pCtx, 16);446 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);445 iemRegSubFromRsp(pVCpu, pCtx, 16); 446 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 447 447 } 448 448 } … … 457 457 IEM_CIMPL_DEF_0(iemCImpl_pusha_32) 458 458 { 459 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);460 RTGCPTR GCPtrTop = iemRegGetEffRsp(p IemCpu, pCtx);459 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 460 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx); 461 461 RTGCPTR GCPtrBottom = GCPtrTop - 31; 462 462 VBOXSTRICTRC rcStrict; … … 471 471 /** @todo do pusha boundary / wrap-around checks. */ 472 472 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop 473 && IEM_IS_REAL_OR_V86_MODE(p IemCpu) ) )473 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) ) 474 474 { 475 475 /* word-by-word */ 476 476 RTUINT64U TmpRsp; 477 477 TmpRsp.u = pCtx->rsp; 478 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->eax, &TmpRsp);478 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->eax, &TmpRsp); 479 479 if (rcStrict == VINF_SUCCESS) 480 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->ecx, &TmpRsp);480 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ecx, &TmpRsp); 481 481 if (rcStrict == VINF_SUCCESS) 482 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->edx, &TmpRsp);482 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edx, &TmpRsp); 483 483 if (rcStrict == VINF_SUCCESS) 484 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->ebx, &TmpRsp);484 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebx, &TmpRsp); 485 485 if (rcStrict == VINF_SUCCESS) 486 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->esp, &TmpRsp);486 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esp, &TmpRsp); 487 487 if (rcStrict == VINF_SUCCESS) 488 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->ebp, &TmpRsp);488 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->ebp, &TmpRsp); 489 489 if (rcStrict == VINF_SUCCESS) 490 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->esi, &TmpRsp);490 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->esi, &TmpRsp); 491 491 if (rcStrict == VINF_SUCCESS) 492 rcStrict = iemMemStackPushU32Ex(p IemCpu, pCtx->edi, &TmpRsp);492 rcStrict = iemMemStackPushU32Ex(pVCpu, pCtx->edi, &TmpRsp); 493 493 if (rcStrict == VINF_SUCCESS) 494 494 { 495 495 pCtx->rsp = TmpRsp.u; 496 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);496 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 497 497 } 498 498 } … … 501 501 GCPtrBottom--; 502 502 uint32_t *pa32Mem; 503 rcStrict = iemMemMap(p IemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);503 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W); 504 504 if (rcStrict == VINF_SUCCESS) 505 505 { … … 512 512 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx; 513 513 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax; 514 rcStrict = iemMemCommitAndUnmap(p IemCpu, pa32Mem, IEM_ACCESS_STACK_W);514 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W); 515 515 if (rcStrict == VINF_SUCCESS) 516 516 { 517 iemRegSubFromRsp(p IemCpu, pCtx, 32);518 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);517 iemRegSubFromRsp(pVCpu, pCtx, 32); 518 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 519 519 } 520 520 } … … 532 532 IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize) 533 533 { 534 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);534 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 535 535 536 536 /* … … 538 538 * doing this in a C implementation). 539 539 */ 540 uint32_t fEfl = IEMMISC_GET_EFL(p IemCpu, pCtx);540 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 541 541 if ( (fEfl & X86_EFL_VM) 542 542 && X86_EFL_GET_IOPL(fEfl) != 3 ) … … 545 545 if ( enmEffOpSize != IEMMODE_16BIT 546 546 || !(pCtx->cr4 & X86_CR4_VME)) 547 return iemRaiseGeneralProtectionFault0(p IemCpu);547 return iemRaiseGeneralProtectionFault0(pVCpu); 548 548 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */ 549 549 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9); 550 return iemMemStackPushU16(p IemCpu, (uint16_t)fEfl);550 return iemMemStackPushU16(pVCpu, (uint16_t)fEfl); 551 551 } 552 552 … … 561 561 case IEMMODE_16BIT: 562 562 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186); 563 if (IEM_GET_TARGET_CPU(p IemCpu) <= IEMTARGETCPU_186)563 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186) 564 564 fEfl |= UINT16_C(0xf000); 565 rcStrict = iemMemStackPushU16(p IemCpu, (uint16_t)fEfl);565 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl); 566 566 break; 567 567 case IEMMODE_32BIT: 568 rcStrict = iemMemStackPushU32(p IemCpu, fEfl);568 rcStrict = iemMemStackPushU32(pVCpu, fEfl); 569 569 break; 570 570 case IEMMODE_64BIT: 571 rcStrict = iemMemStackPushU64(p IemCpu, fEfl);571 rcStrict = iemMemStackPushU64(pVCpu, fEfl); 572 572 break; 573 573 IEM_NOT_REACHED_DEFAULT_CASE_RET(); … … 576 576 return rcStrict; 577 577 578 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);578 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 579 579 return VINF_SUCCESS; 580 580 } … … 588 588 IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize) 589 589 { 590 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);591 uint32_t const fEflOld = IEMMISC_GET_EFL(p IemCpu, pCtx);590 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 591 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu, pCtx); 592 592 VBOXSTRICTRC rcStrict; 593 593 uint32_t fEflNew; … … 608 608 { 609 609 uint16_t u16Value; 610 rcStrict = iemMemStackPopU16(p IemCpu, &u16Value);610 rcStrict = iemMemStackPopU16(pVCpu, &u16Value); 611 611 if (rcStrict != VINF_SUCCESS) 612 612 return rcStrict; … … 615 615 } 616 616 case IEMMODE_32BIT: 617 rcStrict = iemMemStackPopU32(p IemCpu, &fEflNew);617 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew); 618 618 if (rcStrict != VINF_SUCCESS) 619 619 return rcStrict; … … 622 622 } 623 623 624 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386624 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386 625 625 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386; 626 626 fEflNew &= fPopfBits & ~(X86_EFL_IOPL); … … 636 636 RTUINT64U TmpRsp; 637 637 TmpRsp.u = pCtx->rsp; 638 rcStrict = iemMemStackPopU16Ex(p IemCpu, &u16Value, &TmpRsp);638 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp); 639 639 if (rcStrict != VINF_SUCCESS) 640 640 return rcStrict; … … 645 645 && (fEflOld & X86_EFL_VIP)) 646 646 || (u16Value & X86_EFL_TF) ) 647 return iemRaiseGeneralProtectionFault0(p IemCpu);647 return iemRaiseGeneralProtectionFault0(pVCpu); 648 648 649 649 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF); … … 655 655 } 656 656 else 657 return iemRaiseGeneralProtectionFault0(p IemCpu);657 return iemRaiseGeneralProtectionFault0(pVCpu); 658 658 659 659 } … … 669 669 { 670 670 uint16_t u16Value; 671 rcStrict = iemMemStackPopU16(p IemCpu, &u16Value);671 rcStrict = iemMemStackPopU16(pVCpu, &u16Value); 672 672 if (rcStrict != VINF_SUCCESS) 673 673 return rcStrict; … … 685 685 * therefore be used to detect 286 or 386 CPU in real mode. 686 686 */ 687 if ( IEM_GET_TARGET_CPU(p IemCpu) == IEMTARGETCPU_286687 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286 688 688 && !(pCtx->cr0 & X86_CR0_PE) ) 689 689 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL); … … 691 691 } 692 692 case IEMMODE_32BIT: 693 rcStrict = iemMemStackPopU32(p IemCpu, &fEflNew);693 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew); 694 694 if (rcStrict != VINF_SUCCESS) 695 695 return rcStrict; … … 698 698 { 699 699 uint64_t u64Value; 700 rcStrict = iemMemStackPopU64(p IemCpu, &u64Value);700 rcStrict = iemMemStackPopU64(pVCpu, &u64Value); 701 701 if (rcStrict != VINF_SUCCESS) 702 702 return rcStrict; … … 708 708 709 709 /* Merge them with the current flags. */ 710 const uint32_t fPopfBits = IEMCPU_TO_VM(pIemCpu)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386710 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386 711 711 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386; 712 712 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF)) 713 || p IemCpu->uCpl == 0)713 || pVCpu->iem.s.uCpl == 0) 714 714 { 715 715 fEflNew &= fPopfBits; 716 716 fEflNew |= ~fPopfBits & fEflOld; 717 717 } 718 else if (p IemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))718 else if (pVCpu->iem.s.uCpl <= X86_EFL_GET_IOPL(fEflOld)) 719 719 { 720 720 fEflNew &= fPopfBits & ~(X86_EFL_IOPL); … … 732 732 */ 733 733 Assert(fEflNew & RT_BIT_32(1)); 734 IEMMISC_SET_EFL(p IemCpu, pCtx, fEflNew);735 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);734 IEMMISC_SET_EFL(pVCpu, pCtx, fEflNew); 735 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 736 736 737 737 return VINF_SUCCESS; … … 748 748 IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC) 749 749 { 750 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);750 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 751 751 uint16_t uOldPC = pCtx->ip + cbInstr; 752 752 if (uNewPC > pCtx->cs.u32Limit) 753 return iemRaiseGeneralProtectionFault0(p IemCpu);754 755 VBOXSTRICTRC rcStrict = iemMemStackPushU16(p IemCpu, uOldPC);753 return iemRaiseGeneralProtectionFault0(pVCpu); 754 755 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC); 756 756 if (rcStrict != VINF_SUCCESS) 757 757 return rcStrict; … … 761 761 762 762 /* Flush the prefetch buffer. */ 763 p IemCpu->cbOpcode = pIemCpu->offOpcode;763 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 764 764 return VINF_SUCCESS; 765 765 } … … 773 773 IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp) 774 774 { 775 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);775 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 776 776 uint16_t uOldPC = pCtx->ip + cbInstr; 777 777 uint16_t uNewPC = uOldPC + offDisp; 778 778 if (uNewPC > pCtx->cs.u32Limit) 779 return iemRaiseGeneralProtectionFault0(p IemCpu);780 781 VBOXSTRICTRC rcStrict = iemMemStackPushU16(p IemCpu, uOldPC);779 return iemRaiseGeneralProtectionFault0(pVCpu); 780 781 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC); 782 782 if (rcStrict != VINF_SUCCESS) 783 783 return rcStrict; … … 787 787 788 788 /* Flush the prefetch buffer. */ 789 p IemCpu->cbOpcode = pIemCpu->offOpcode;789 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 790 790 return VINF_SUCCESS; 791 791 } … … 801 801 IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC) 802 802 { 803 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);803 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 804 804 uint32_t uOldPC = pCtx->eip + cbInstr; 805 805 if (uNewPC > pCtx->cs.u32Limit) 806 return iemRaiseGeneralProtectionFault0(p IemCpu);807 808 VBOXSTRICTRC rcStrict = iemMemStackPushU32(p IemCpu, uOldPC);806 return iemRaiseGeneralProtectionFault0(pVCpu); 807 808 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC); 809 809 if (rcStrict != VINF_SUCCESS) 810 810 return rcStrict; … … 816 816 if ( !pCtx->eflags.Bits.u1IF 817 817 && (pCtx->cr0 & X86_CR0_PG) 818 && !CSAMIsEnabled( IEMCPU_TO_VM(pIemCpu))819 && p IemCpu->uCpl == 0)820 { 821 EMSTATE enmState = EMGetState( IEMCPU_TO_VMCPU(pIemCpu));818 && !CSAMIsEnabled(pVCpu->CTX_SUFF(pVM)) 819 && pVCpu->iem.s.uCpl == 0) 820 { 821 EMSTATE enmState = EMGetState(pVCpu); 822 822 if ( enmState == EMSTATE_IEM_THEN_REM 823 823 || enmState == EMSTATE_IEM 824 824 || enmState == EMSTATE_REM) 825 CSAMR3RecordCallAddress( IEMCPU_TO_VM(pIemCpu), pCtx->eip);825 CSAMR3RecordCallAddress(pVCpu->CTX_SUFF(pVM), pCtx->eip); 826 826 } 827 827 #endif … … 831 831 832 832 /* Flush the prefetch buffer. */ 833 p IemCpu->cbOpcode = pIemCpu->offOpcode;833 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 834 834 return VINF_SUCCESS; 835 835 } … … 843 843 IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp) 844 844 { 845 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);845 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 846 846 uint32_t uOldPC = pCtx->eip + cbInstr; 847 847 uint32_t uNewPC = uOldPC + offDisp; 848 848 if (uNewPC > pCtx->cs.u32Limit) 849 return iemRaiseGeneralProtectionFault0(p IemCpu);850 851 VBOXSTRICTRC rcStrict = iemMemStackPushU32(p IemCpu, uOldPC);849 return iemRaiseGeneralProtectionFault0(pVCpu); 850 851 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC); 852 852 if (rcStrict != VINF_SUCCESS) 853 853 return rcStrict; … … 857 857 858 858 /* Flush the prefetch buffer. */ 859 p IemCpu->cbOpcode = pIemCpu->offOpcode;859 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 860 860 return VINF_SUCCESS; 861 861 } … … 871 871 IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC) 872 872 { 873 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);873 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 874 874 uint64_t uOldPC = pCtx->rip + cbInstr; 875 875 if (!IEM_IS_CANONICAL(uNewPC)) 876 return iemRaiseGeneralProtectionFault0(p IemCpu);877 878 VBOXSTRICTRC rcStrict = iemMemStackPushU64(p IemCpu, uOldPC);876 return iemRaiseGeneralProtectionFault0(pVCpu); 877 878 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC); 879 879 if (rcStrict != VINF_SUCCESS) 880 880 return rcStrict; … … 884 884 885 885 /* Flush the prefetch buffer. */ 886 p IemCpu->cbOpcode = pIemCpu->offOpcode;886 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 887 887 return VINF_SUCCESS; 888 888 } … … 896 896 IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp) 897 897 { 898 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);898 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 899 899 uint64_t uOldPC = pCtx->rip + cbInstr; 900 900 uint64_t uNewPC = uOldPC + offDisp; 901 901 if (!IEM_IS_CANONICAL(uNewPC)) 902 return iemRaiseNotCanonical(p IemCpu);903 904 VBOXSTRICTRC rcStrict = iemMemStackPushU64(p IemCpu, uOldPC);902 return iemRaiseNotCanonical(pVCpu); 903 904 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC); 905 905 if (rcStrict != VINF_SUCCESS) 906 906 return rcStrict; … … 910 910 911 911 /* Flush the prefetch buffer. */ 912 p IemCpu->cbOpcode = pIemCpu->offOpcode;912 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 913 913 914 914 return VINF_SUCCESS; … … 934 934 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL); 935 935 936 if ( pDesc->Legacy.Gate.u2Dpl < p IemCpu->uCpl936 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl 937 937 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL)) 938 938 { 939 939 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl, 940 p IemCpu->uCpl, (uSel & X86_SEL_RPL)));941 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);940 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL))); 941 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 942 942 } 943 943 … … 948 948 { 949 949 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel)); 950 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);951 } 952 953 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);950 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 951 } 952 953 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 954 954 uint32_t uNextEip = pCtx->eip + cbInstr; 955 return iemTaskSwitch(p IemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,955 return iemTaskSwitch(pVCpu, pVCpu->iem.s.CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, 956 956 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc); 957 957 #endif … … 975 975 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 976 976 977 if ( pDesc->Legacy.Gate.u2Dpl < p IemCpu->uCpl977 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl 978 978 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL)) 979 979 { 980 980 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl, 981 p IemCpu->uCpl, (uSel & X86_SEL_RPL)));982 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);981 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL))); 982 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 983 983 } 984 984 … … 989 989 { 990 990 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel)); 991 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);991 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 992 992 } 993 993 … … 999 999 { 1000 1000 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss)); 1001 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);1001 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 1002 1002 } 1003 1003 1004 1004 IEMSELDESC TssDesc; 1005 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(p IemCpu, &TssDesc, uSelTss, X86_XCPT_GP);1005 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP); 1006 1006 if (rcStrict != VINF_SUCCESS) 1007 1007 return rcStrict; … … 1011 1011 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss, 1012 1012 TssDesc.Legacy.Gate.u4Type)); 1013 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel & X86_SEL_MASK_OFF_RPL);1013 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL); 1014 1014 } 1015 1015 … … 1017 1017 { 1018 1018 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss)); 1019 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSelTss & X86_SEL_MASK_OFF_RPL);1020 } 1021 1022 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);1019 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL); 1020 } 1021 1022 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1023 1023 uint32_t uNextEip = pCtx->eip + cbInstr; 1024 return iemTaskSwitch(p IemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,1024 return iemTaskSwitch(pVCpu, pVCpu->iem.s.CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, 1025 1025 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc); 1026 1026 #endif … … 1074 1074 1075 1075 /* Perform DPL checks on the gate descriptor. */ 1076 if ( pDesc->Legacy.Gate.u2Dpl < p IemCpu->uCpl1076 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl 1077 1077 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL)) 1078 1078 { 1079 1079 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl, 1080 p IemCpu->uCpl, (uSel & X86_SEL_RPL)));1081 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1080 pVCpu->iem.s.uCpl, (uSel & X86_SEL_RPL))); 1081 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1082 1082 } 1083 1083 … … 1086 1086 { 1087 1087 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel)); 1088 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSel);1088 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel); 1089 1089 } 1090 1090 … … 1093 1093 */ 1094 1094 uNewCS = pDesc->Legacy.Gate.u16Sel; 1095 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescCS, uNewCS, X86_XCPT_GP);1095 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP); 1096 1096 if (rcStrict != VINF_SUCCESS) 1097 1097 return rcStrict; … … 1103 1103 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n", 1104 1104 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type)); 1105 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCS);1105 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS); 1106 1106 } 1107 1107 … … 1111 1111 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1112 1112 { 1113 if (DescCS.Legacy.Gen.u2Dpl > p IemCpu->uCpl)1113 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl) 1114 1114 { 1115 1115 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n", 1116 uNewCS, DescCS.Legacy.Gen.u2Dpl, p IemCpu->uCpl));1117 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCS);1116 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 1117 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS); 1118 1118 } 1119 1119 } 1120 1120 else 1121 1121 { 1122 if (DescCS.Legacy.Gen.u2Dpl != p IemCpu->uCpl)1122 if (DescCS.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl) 1123 1123 { 1124 1124 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n", 1125 uNewCS, DescCS.Legacy.Gen.u2Dpl, p IemCpu->uCpl));1126 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCS);1125 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 1126 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS); 1127 1127 } 1128 1128 } … … 1131 1131 { 1132 1132 Assert(enmBranch == IEMBRANCH_CALL); 1133 if (DescCS.Legacy.Gen.u2Dpl > p IemCpu->uCpl)1133 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl) 1134 1134 { 1135 1135 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n", 1136 uNewCS, DescCS.Legacy.Gen.u2Dpl, p IemCpu->uCpl));1137 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCS & X86_SEL_MASK_OFF_RPL);1136 uNewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 1137 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 1138 1138 } 1139 1139 } 1140 1140 1141 1141 /* Additional long mode checks. */ 1142 if (IEM_IS_LONG_MODE(p IemCpu))1142 if (IEM_IS_LONG_MODE(pVCpu)) 1143 1143 { 1144 1144 if (!DescCS.Legacy.Gen.u1Long) 1145 1145 { 1146 1146 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS)); 1147 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCS);1147 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS); 1148 1148 } 1149 1149 … … 1153 1153 { 1154 1154 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS)); 1155 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCS);1155 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS); 1156 1156 } 1157 1157 } … … 1160 1160 { 1161 1161 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS)); 1162 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uNewCS);1163 } 1164 1165 pCtx = p IemCpu->CTX_SUFF(pCtx);1162 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS); 1163 } 1164 1165 pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1166 1166 1167 1167 if (enmBranch == IEMBRANCH_JUMP) … … 1183 1183 { 1184 1184 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit)); 1185 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, 0);1185 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0); 1186 1186 } 1187 1187 u64Base = X86DESC_BASE(&DescCS.Legacy); … … 1192 1192 { 1193 1193 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip)); 1194 return iemRaiseNotCanonical(p IemCpu);1194 return iemRaiseNotCanonical(pVCpu); 1195 1195 } 1196 1196 … … 1201 1201 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1202 1202 { 1203 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCS);1203 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS); 1204 1204 if (rcStrict != VINF_SUCCESS) 1205 1205 return rcStrict; … … 1211 1211 pCtx->rip = uNewRip; 1212 1212 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1213 pCtx->cs.Sel |= p IemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */1213 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */ 1214 1214 pCtx->cs.ValidSel = pCtx->cs.Sel; 1215 1215 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; … … 1217 1217 pCtx->cs.u32Limit = cbLimit; 1218 1218 pCtx->cs.u64Base = u64Base; 1219 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);1219 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 1220 1220 } 1221 1221 else … … 1224 1224 /* Calls are much more complicated. */ 1225 1225 1226 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < p IemCpu->uCpl))1226 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < pVCpu->iem.s.uCpl)) 1227 1227 { 1228 1228 uint16_t offNewStack; /* Offset of new stack in TSS. */ … … 1249 1249 /* Figure out where the new stack pointer is stored in the TSS. */ 1250 1250 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl; 1251 if (!IEM_IS_LONG_MODE(p IemCpu))1251 if (!IEM_IS_LONG_MODE(pVCpu)) 1252 1252 { 1253 1253 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY) … … 1274 1274 { 1275 1275 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit)); 1276 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, pCtx->tr.Sel);1276 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pCtx->tr.Sel); 1277 1277 } 1278 1278 1279 1279 GCPtrTSS = pCtx->tr.u64Base + offNewStack; 1280 rcStrict = iemMemMap(p IemCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);1280 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R); 1281 1281 if (rcStrict != VINF_SUCCESS) 1282 1282 { … … 1285 1285 } 1286 1286 1287 if (!IEM_IS_LONG_MODE(p IemCpu))1287 if (!IEM_IS_LONG_MODE(pVCpu)) 1288 1288 { 1289 1289 if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY) … … 1308 1308 1309 1309 /* Done with the TSS now. */ 1310 rcStrict = iemMemCommitAndUnmap(p IemCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);1310 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R); 1311 1311 if (rcStrict != VINF_SUCCESS) 1312 1312 { … … 1319 1319 1320 1320 /* If EFER.LMA is 0, there's extra work to do. */ 1321 if (!IEM_IS_LONG_MODE(p IemCpu))1321 if (!IEM_IS_LONG_MODE(pVCpu)) 1322 1322 { 1323 1323 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0) 1324 1324 { 1325 1325 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n")); 1326 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, uNewSS);1326 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS); 1327 1327 } 1328 1328 1329 1329 /* Grab the new SS descriptor. */ 1330 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescSS, uNewSS, X86_XCPT_SS);1330 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS); 1331 1331 if (rcStrict != VINF_SUCCESS) 1332 1332 return rcStrict; … … 1338 1338 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n", 1339 1339 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl)); 1340 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, uNewSS);1340 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS); 1341 1341 } 1342 1342 … … 1345 1345 { 1346 1346 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type)); 1347 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, uNewSS);1347 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS); 1348 1348 } 1349 1349 … … 1351 1351 { 1352 1352 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS)); 1353 return iemRaiseStackSelectorNotPresentBySelector(p IemCpu, uNewSS);1353 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS); 1354 1354 } 1355 1355 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) … … 1361 1361 { 1362 1362 /* Just grab the new (NULL) SS descriptor. */ 1363 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescSS, uNewSS, X86_XCPT_SS);1363 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS); 1364 1364 if (rcStrict != VINF_SUCCESS) 1365 1365 return rcStrict; … … 1382 1382 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1383 1383 { 1384 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewSS);1384 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS); 1385 1385 if (rcStrict != VINF_SUCCESS) 1386 1386 return rcStrict; … … 1402 1402 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 1403 1403 pCtx->rsp = uNewRsp; 1404 p IemCpu->uCpl = uNewCSDpl;1405 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss));1406 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);1404 pVCpu->iem.s.uCpl = uNewCSDpl; 1405 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss)); 1406 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 1407 1407 1408 1408 /* Check new stack - may #SS(NewSS). */ 1409 rcStrict = iemMemStackPushBeginSpecial(p IemCpu, cbNewStack,1409 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack, 1410 1410 &uPtrRet.pv, &uNewRsp); 1411 1411 if (rcStrict != VINF_SUCCESS) … … 1415 1415 } 1416 1416 1417 if (!IEM_IS_LONG_MODE(p IemCpu))1417 if (!IEM_IS_LONG_MODE(pVCpu)) 1418 1418 { 1419 1419 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) … … 1424 1424 1425 1425 /* Map the relevant chunk of the old stack. */ 1426 rcStrict = iemMemMap(p IemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);1426 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R); 1427 1427 if (rcStrict != VINF_SUCCESS) 1428 1428 { … … 1436 1436 1437 1437 /* Unmap the old stack. */ 1438 rcStrict = iemMemCommitAndUnmap(p IemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);1438 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R); 1439 1439 if (rcStrict != VINF_SUCCESS) 1440 1440 { … … 1456 1456 1457 1457 /* Map the relevant chunk of the old stack. */ 1458 rcStrict = iemMemMap(p IemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);1458 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R); 1459 1459 if (rcStrict != VINF_SUCCESS) 1460 1460 { … … 1468 1468 1469 1469 /* Unmap the old stack. */ 1470 rcStrict = iemMemCommitAndUnmap(p IemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);1470 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R); 1471 1471 if (rcStrict != VINF_SUCCESS) 1472 1472 { … … 1491 1491 } 1492 1492 1493 rcStrict = iemMemStackPushCommitSpecial(p IemCpu, uPtrRet.pv, uNewRsp);1493 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp); 1494 1494 if (rcStrict != VINF_SUCCESS) 1495 1495 { … … 1504 1504 /* Limit / canonical check. */ 1505 1505 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy); 1506 if (!IEM_IS_LONG_MODE(p IemCpu))1506 if (!IEM_IS_LONG_MODE(pVCpu)) 1507 1507 { 1508 1508 if (uNewRip > cbLimit) 1509 1509 { 1510 1510 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit)); 1511 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, 0);1511 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0); 1512 1512 } 1513 1513 u64Base = X86DESC_BASE(&DescCS.Legacy); … … 1519 1519 { 1520 1520 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip)); 1521 return iemRaiseNotCanonical(p IemCpu);1521 return iemRaiseNotCanonical(pVCpu); 1522 1522 } 1523 1523 u64Base = 0; … … 1532 1532 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1533 1533 { 1534 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCS);1534 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS); 1535 1535 if (rcStrict != VINF_SUCCESS) 1536 1536 return rcStrict; … … 1542 1542 pCtx->rip = uNewRip; 1543 1543 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1544 pCtx->cs.Sel |= p IemCpu->uCpl;1544 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; 1545 1545 pCtx->cs.ValidSel = pCtx->cs.Sel; 1546 1546 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; … … 1548 1548 pCtx->cs.u32Limit = cbLimit; 1549 1549 pCtx->cs.u64Base = u64Base; 1550 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);1550 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 1551 1551 } 1552 1552 else … … 1558 1558 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in 1559 1559 * 16-bit code cause a two or four byte CS to be pushed? */ 1560 rcStrict = iemMemStackPushBeginSpecial(p IemCpu,1561 IEM_IS_LONG_MODE(p IemCpu) ? 8+81560 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 1561 IEM_IS_LONG_MODE(pVCpu) ? 8+8 1562 1562 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2, 1563 1563 &uPtrRet.pv, &uNewRsp); … … 1571 1571 /* Limit / canonical check. */ 1572 1572 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy); 1573 if (!IEM_IS_LONG_MODE(p IemCpu))1573 if (!IEM_IS_LONG_MODE(pVCpu)) 1574 1574 { 1575 1575 if (uNewRip > cbLimit) 1576 1576 { 1577 1577 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit)); 1578 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, 0);1578 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0); 1579 1579 } 1580 1580 u64Base = X86DESC_BASE(&DescCS.Legacy); … … 1585 1585 { 1586 1586 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip)); 1587 return iemRaiseNotCanonical(p IemCpu);1587 return iemRaiseNotCanonical(pVCpu); 1588 1588 } 1589 1589 u64Base = 0; … … 1598 1598 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1599 1599 { 1600 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCS);1600 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS); 1601 1601 if (rcStrict != VINF_SUCCESS) 1602 1602 return rcStrict; … … 1606 1606 1607 1607 /* stack */ 1608 if (!IEM_IS_LONG_MODE(p IemCpu))1608 if (!IEM_IS_LONG_MODE(pVCpu)) 1609 1609 { 1610 1610 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) … … 1627 1627 } 1628 1628 1629 rcStrict = iemMemStackPushCommitSpecial(p IemCpu, uPtrRet.pv, uNewRsp);1629 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp); 1630 1630 if (rcStrict != VINF_SUCCESS) 1631 1631 return rcStrict; … … 1634 1634 pCtx->rip = uNewRip; 1635 1635 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1636 pCtx->cs.Sel |= p IemCpu->uCpl;1636 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; 1637 1637 pCtx->cs.ValidSel = pCtx->cs.Sel; 1638 1638 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; … … 1640 1640 pCtx->cs.u32Limit = cbLimit; 1641 1641 pCtx->cs.u64Base = u64Base; 1642 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);1642 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 1643 1643 } 1644 1644 } … … 1646 1646 1647 1647 /* Flush the prefetch buffer. */ 1648 p IemCpu->cbOpcode = pIemCpu->offOpcode;1648 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 1649 1649 return VINF_SUCCESS; 1650 1650 #endif … … 1665 1665 Assert((uSel & X86_SEL_MASK_OFF_RPL)); 1666 1666 1667 if (IEM_IS_LONG_MODE(p IemCpu))1667 if (IEM_IS_LONG_MODE(pVCpu)) 1668 1668 switch (pDesc->Legacy.Gen.u4Type) 1669 1669 { … … 1678 1678 case AMD64_SEL_TYPE_SYS_INT_GATE: 1679 1679 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type)); 1680 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1680 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1681 1681 } 1682 1682 … … 1696 1696 case X86_SEL_TYPE_SYS_286_TSS_BUSY: 1697 1697 Log(("branch %04x -> busy 286 TSS\n", uSel)); 1698 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1698 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1699 1699 1700 1700 case X86_SEL_TYPE_SYS_386_TSS_BUSY: 1701 1701 Log(("branch %04x -> busy 386 TSS\n", uSel)); 1702 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1702 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1703 1703 1704 1704 default: … … 1709 1709 case X86_SEL_TYPE_SYS_386_TRAP_GATE: 1710 1710 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type)); 1711 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1711 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1712 1712 } 1713 1713 } … … 1723 1723 IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize) 1724 1724 { 1725 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);1725 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1726 1726 NOREF(cbInstr); 1727 1727 Assert(offSeg <= UINT32_MAX); … … 1732 1732 * limit. 1733 1733 */ 1734 if ( p IemCpu->enmCpuMode == IEMMODE_16BIT1735 && IEM_IS_REAL_OR_V86_MODE(p IemCpu))1734 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT 1735 && IEM_IS_REAL_OR_V86_MODE(pVCpu)) 1736 1736 { 1737 1737 if (offSeg > pCtx->cs.u32Limit) 1738 1738 { 1739 1739 Log(("iemCImpl_FarJmp: 16-bit limit\n")); 1740 return iemRaiseGeneralProtectionFault0(p IemCpu);1740 return iemRaiseGeneralProtectionFault0(pVCpu); 1741 1741 } 1742 1742 … … 1759 1759 { 1760 1760 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg)); 1761 return iemRaiseGeneralProtectionFault0(p IemCpu);1761 return iemRaiseGeneralProtectionFault0(pVCpu); 1762 1762 } 1763 1763 1764 1764 /* Fetch the descriptor. */ 1765 1765 IEMSELDESC Desc; 1766 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(p IemCpu, &Desc, uSel, X86_XCPT_GP);1766 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); 1767 1767 if (rcStrict != VINF_SUCCESS) 1768 1768 return rcStrict; … … 1772 1772 { 1773 1773 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg)); 1774 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSel);1774 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel); 1775 1775 } 1776 1776 … … 1786 1786 { 1787 1787 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type)); 1788 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1788 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1789 1789 } 1790 1790 … … 1792 1792 if ( Desc.Legacy.Gen.u1Long 1793 1793 && Desc.Legacy.Gen.u1DefBig 1794 && IEM_IS_LONG_MODE(p IemCpu))1794 && IEM_IS_LONG_MODE(pVCpu)) 1795 1795 { 1796 1796 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg)); 1797 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1797 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1798 1798 } 1799 1799 … … 1801 1801 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1802 1802 { 1803 if (p IemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)1803 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl) 1804 1804 { 1805 1805 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n", 1806 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, p IemCpu->uCpl));1807 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1806 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 1807 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1808 1808 } 1809 1809 } 1810 1810 else 1811 1811 { 1812 if (p IemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)1813 { 1814 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, p IemCpu->uCpl));1815 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1816 } 1817 if ((uSel & X86_SEL_RPL) > p IemCpu->uCpl)1818 { 1819 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), p IemCpu->uCpl));1820 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1812 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl) 1813 { 1814 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 1815 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1816 } 1817 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl) 1818 { 1819 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl)); 1820 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1821 1821 } 1822 1822 } … … 1838 1838 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit)); 1839 1839 /** @todo: Intel says this is #GP(0)! */ 1840 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1840 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1841 1841 } 1842 1842 u64Base = X86DESC_BASE(&Desc.Legacy); … … 1849 1849 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1850 1850 { 1851 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uSel);1851 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel); 1852 1852 if (rcStrict != VINF_SUCCESS) 1853 1853 return rcStrict; … … 1859 1859 pCtx->rip = offSeg; 1860 1860 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL; 1861 pCtx->cs.Sel |= p IemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */1861 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */ 1862 1862 pCtx->cs.ValidSel = pCtx->cs.Sel; 1863 1863 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; … … 1865 1865 pCtx->cs.u32Limit = cbLimit; 1866 1866 pCtx->cs.u64Base = u64Base; 1867 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);1867 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 1868 1868 pCtx->eflags.Bits.u1RF = 0; 1869 1869 /** @todo check if the hidden bits are loaded correctly for 64-bit … … 1871 1871 1872 1872 /* Flush the prefetch buffer. */ 1873 p IemCpu->cbOpcode = pIemCpu->offOpcode;1873 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 1874 1874 1875 1875 return VINF_SUCCESS; … … 1888 1888 IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize) 1889 1889 { 1890 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);1890 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1891 1891 VBOXSTRICTRC rcStrict; 1892 1892 uint64_t uNewRsp; … … 1898 1898 * limit. 1899 1899 */ 1900 if ( p IemCpu->enmCpuMode == IEMMODE_16BIT1901 && IEM_IS_REAL_OR_V86_MODE(p IemCpu))1900 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT 1901 && IEM_IS_REAL_OR_V86_MODE(pVCpu)) 1902 1902 { 1903 1903 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT); 1904 1904 1905 1905 /* Check stack first - may #SS(0). */ 1906 rcStrict = iemMemStackPushBeginSpecial(p IemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,1906 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4, 1907 1907 &uPtrRet.pv, &uNewRsp); 1908 1908 if (rcStrict != VINF_SUCCESS) … … 1911 1911 /* Check the target address range. */ 1912 1912 if (offSeg > UINT32_MAX) 1913 return iemRaiseGeneralProtectionFault0(p IemCpu);1913 return iemRaiseGeneralProtectionFault0(pVCpu); 1914 1914 1915 1915 /* Everything is fine, push the return address. */ … … 1924 1924 uPtrRet.pu16[3] = pCtx->cs.Sel; 1925 1925 } 1926 rcStrict = iemMemStackPushCommitSpecial(p IemCpu, uPtrRet.pv, uNewRsp);1926 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp); 1927 1927 if (rcStrict != VINF_SUCCESS) 1928 1928 return rcStrict; … … 1944 1944 { 1945 1945 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg)); 1946 return iemRaiseGeneralProtectionFault0(p IemCpu);1946 return iemRaiseGeneralProtectionFault0(pVCpu); 1947 1947 } 1948 1948 1949 1949 /* Fetch the descriptor. */ 1950 1950 IEMSELDESC Desc; 1951 rcStrict = iemMemFetchSelDesc(p IemCpu, &Desc, uSel, X86_XCPT_GP);1951 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); 1952 1952 if (rcStrict != VINF_SUCCESS) 1953 1953 return rcStrict; … … 1964 1964 { 1965 1965 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type)); 1966 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1966 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1967 1967 } 1968 1968 … … 1970 1970 if ( Desc.Legacy.Gen.u1Long 1971 1971 && Desc.Legacy.Gen.u1DefBig 1972 && IEM_IS_LONG_MODE(p IemCpu))1972 && IEM_IS_LONG_MODE(pVCpu)) 1973 1973 { 1974 1974 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg)); 1975 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1975 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1976 1976 } 1977 1977 … … 1979 1979 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1980 1980 { 1981 if (p IemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)1981 if (pVCpu->iem.s.uCpl < Desc.Legacy.Gen.u2Dpl) 1982 1982 { 1983 1983 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n", 1984 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, p IemCpu->uCpl));1985 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1984 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 1985 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1986 1986 } 1987 1987 } 1988 1988 else 1989 1989 { 1990 if (p IemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)1991 { 1992 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, p IemCpu->uCpl));1993 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1994 } 1995 if ((uSel & X86_SEL_RPL) > p IemCpu->uCpl)1996 { 1997 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), p IemCpu->uCpl));1998 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);1990 if (pVCpu->iem.s.uCpl != Desc.Legacy.Gen.u2Dpl) 1991 { 1992 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 1993 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1994 } 1995 if ((uSel & X86_SEL_RPL) > pVCpu->iem.s.uCpl) 1996 { 1997 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl)); 1998 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1999 1999 } 2000 2000 } … … 2004 2004 { 2005 2005 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg)); 2006 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSel);2006 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel); 2007 2007 } 2008 2008 … … 2010 2010 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in 2011 2011 * 16-bit code cause a two or four byte CS to be pushed? */ 2012 rcStrict = iemMemStackPushBeginSpecial(p IemCpu,2012 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 2013 2013 enmEffOpSize == IEMMODE_64BIT ? 8+8 2014 2014 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2, … … 2024 2024 uint64_t u64Base; 2025 2025 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 2026 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)2026 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 2027 2027 { 2028 2028 if (!IEM_IS_CANONICAL(offSeg)) 2029 2029 { 2030 2030 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg)); 2031 return iemRaiseNotCanonical(p IemCpu);2031 return iemRaiseNotCanonical(pVCpu); 2032 2032 } 2033 2033 u64Base = 0; … … 2039 2039 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit)); 2040 2040 /** @todo: Intel says this is #GP(0)! */ 2041 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);2041 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 2042 2042 } 2043 2043 u64Base = X86DESC_BASE(&Desc.Legacy); … … 2052 2052 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2053 2053 { 2054 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uSel);2054 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel); 2055 2055 if (rcStrict != VINF_SUCCESS) 2056 2056 return rcStrict; … … 2075 2075 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */ 2076 2076 } 2077 rcStrict = iemMemStackPushCommitSpecial(p IemCpu, uPtrRet.pv, uNewRsp);2077 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp); 2078 2078 if (rcStrict != VINF_SUCCESS) 2079 2079 return rcStrict; … … 2082 2082 pCtx->rip = offSeg; 2083 2083 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL; 2084 pCtx->cs.Sel |= p IemCpu->uCpl;2084 pCtx->cs.Sel |= pVCpu->iem.s.uCpl; 2085 2085 pCtx->cs.ValidSel = pCtx->cs.Sel; 2086 2086 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; … … 2088 2088 pCtx->cs.u32Limit = cbLimit; 2089 2089 pCtx->cs.u64Base = u64Base; 2090 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);2090 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 2091 2091 pCtx->eflags.Bits.u1RF = 0; 2092 2092 /** @todo check if the hidden bits are loaded correctly for 64-bit … … 2094 2094 2095 2095 /* Flush the prefetch buffer. */ 2096 p IemCpu->cbOpcode = pIemCpu->offOpcode;2096 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2097 2097 2098 2098 return VINF_SUCCESS; … … 2109 2109 IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) 2110 2110 { 2111 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);2111 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 2112 2112 VBOXSTRICTRC rcStrict; 2113 2113 RTCPTRUNION uPtrFrame; … … 2122 2122 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2 2123 2123 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8; 2124 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);2124 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp); 2125 2125 if (rcStrict != VINF_SUCCESS) 2126 2126 return rcStrict; … … 2144 2144 * Real mode and V8086 mode are easy. 2145 2145 */ 2146 if ( p IemCpu->enmCpuMode == IEMMODE_16BIT2147 && IEM_IS_REAL_OR_V86_MODE(p IemCpu))2146 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT 2147 && IEM_IS_REAL_OR_V86_MODE(pVCpu)) 2148 2148 { 2149 2149 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); … … 2154 2154 * operands, AMD does not make any distinction. What is right? */ 2155 2155 if (uNewRip > pCtx->cs.u32Limit) 2156 return iemRaiseSelectorBounds(p IemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);2156 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 2157 2157 2158 2158 /* commit the operation. */ 2159 rcStrict = iemMemStackPopCommitSpecial(p IemCpu, uPtrFrame.pv, uNewRsp);2159 rcStrict = iemMemStackPopCommitSpecial(pVCpu, uPtrFrame.pv, uNewRsp); 2160 2160 if (rcStrict != VINF_SUCCESS) 2161 2161 return rcStrict; … … 2168 2168 /** @todo do we load attribs and limit as well? */ 2169 2169 if (cbPop) 2170 iemRegAddToRsp(p IemCpu, pCtx, cbPop);2170 iemRegAddToRsp(pVCpu, pCtx, cbPop); 2171 2171 return VINF_SUCCESS; 2172 2172 } … … 2178 2178 { 2179 2179 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip)); 2180 return iemRaiseGeneralProtectionFault0(p IemCpu);2180 return iemRaiseGeneralProtectionFault0(pVCpu); 2181 2181 } 2182 2182 2183 2183 /* Fetch the descriptor. */ 2184 2184 IEMSELDESC DescCs; 2185 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescCs, uNewCs, X86_XCPT_GP);2185 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP); 2186 2186 if (rcStrict != VINF_SUCCESS) 2187 2187 return rcStrict; … … 2193 2193 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n", 2194 2194 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type)); 2195 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);2195 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2196 2196 } 2197 2197 … … 2199 2199 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */ 2200 2200 && DescCs.Legacy.Gen.u1DefBig 2201 && IEM_IS_LONG_MODE(p IemCpu))2201 && IEM_IS_LONG_MODE(pVCpu)) 2202 2202 { 2203 2203 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip)); 2204 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);2204 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2205 2205 } 2206 2206 2207 2207 /* DPL/RPL/CPL checks. */ 2208 if ((uNewCs & X86_SEL_RPL) < p IemCpu->uCpl)2209 { 2210 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, p IemCpu->uCpl));2211 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);2208 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl) 2209 { 2210 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl)); 2211 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2212 2212 } 2213 2213 … … 2218 2218 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n", 2219 2219 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL))); 2220 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);2220 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2221 2221 } 2222 2222 } … … 2227 2227 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n", 2228 2228 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL))); 2229 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);2229 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2230 2230 } 2231 2231 } … … 2235 2235 { 2236 2236 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip)); 2237 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uNewCs);2237 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs); 2238 2238 } 2239 2239 … … 2241 2241 * Return to outer privilege? (We'll typically have entered via a call gate.) 2242 2242 */ 2243 if ((uNewCs & X86_SEL_RPL) != p IemCpu->uCpl)2243 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl) 2244 2244 { 2245 2245 /* Read the outer stack pointer stored *after* the parameters. */ 2246 2246 RTCPTRUNION uPtrStack; 2247 rcStrict = iemMemStackPopContinueSpecial(p IemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);2247 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp); 2248 2248 if (rcStrict != VINF_SUCCESS) 2249 2249 return rcStrict; … … 2279 2279 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n", 2280 2280 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 2281 return iemRaiseGeneralProtectionFault0(p IemCpu);2281 return iemRaiseGeneralProtectionFault0(pVCpu); 2282 2282 } 2283 2283 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */ … … 2287 2287 { 2288 2288 /* Fetch the descriptor for the new stack segment. */ 2289 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);2289 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP); 2290 2290 if (rcStrict != VINF_SUCCESS) 2291 2291 return rcStrict; … … 2296 2296 { 2297 2297 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 2298 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewOuterSs);2298 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs); 2299 2299 } 2300 2300 … … 2306 2306 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n", 2307 2307 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type)); 2308 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewOuterSs);2308 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs); 2309 2309 } 2310 2310 … … 2312 2312 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */ 2313 2313 && DescSs.Legacy.Gen.u1DefBig 2314 && IEM_IS_LONG_MODE(p IemCpu))2314 && IEM_IS_LONG_MODE(pVCpu)) 2315 2315 { 2316 2316 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n", 2317 2317 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 2318 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewOuterSs);2318 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs); 2319 2319 } 2320 2320 … … 2324 2324 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n", 2325 2325 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL)); 2326 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewOuterSs);2326 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs); 2327 2327 } 2328 2328 … … 2331 2331 { 2332 2332 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 2333 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uNewCs);2333 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs); 2334 2334 } 2335 2335 … … 2343 2343 /** @todo Testcase: Is this correct? */ 2344 2344 if ( DescCs.Legacy.Gen.u1Long 2345 && IEM_IS_LONG_MODE(p IemCpu) )2345 && IEM_IS_LONG_MODE(pVCpu) ) 2346 2346 { 2347 2347 if (!IEM_IS_CANONICAL(uNewRip)) 2348 2348 { 2349 2349 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 2350 return iemRaiseNotCanonical(p IemCpu);2350 return iemRaiseNotCanonical(pVCpu); 2351 2351 } 2352 2352 u64Base = 0; … … 2359 2359 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs)); 2360 2360 /** @todo: Intel says this is #GP(0)! */ 2361 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);2361 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2362 2362 } 2363 2363 u64Base = X86DESC_BASE(&DescCs.Legacy); … … 2372 2372 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2373 2373 { 2374 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCs);2374 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs); 2375 2375 if (rcStrict != VINF_SUCCESS) 2376 2376 return rcStrict; … … 2381 2381 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2382 2382 { 2383 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewOuterSs);2383 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs); 2384 2384 if (rcStrict != VINF_SUCCESS) 2385 2385 return rcStrict; … … 2389 2389 2390 2390 /* commit */ 2391 rcStrict = iemMemStackPopCommitSpecial(p IemCpu, uPtrFrame.pv, uNewRsp);2391 rcStrict = iemMemStackPopCommitSpecial(pVCpu, uPtrFrame.pv, uNewRsp); 2392 2392 if (rcStrict != VINF_SUCCESS) 2393 2393 return rcStrict; … … 2402 2402 pCtx->cs.u32Limit = cbLimitCs; 2403 2403 pCtx->cs.u64Base = u64Base; 2404 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);2404 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 2405 2405 pCtx->rsp = uNewOuterRsp; 2406 2406 pCtx->ss.Sel = uNewOuterSs; … … 2409 2409 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy); 2410 2410 pCtx->ss.u32Limit = cbLimitSs; 2411 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)2411 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 2412 2412 pCtx->ss.u64Base = 0; 2413 2413 else 2414 2414 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy); 2415 2415 2416 p IemCpu->uCpl = (uNewCs & X86_SEL_RPL);2417 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);2418 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);2419 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);2420 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);2416 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL); 2417 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds); 2418 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es); 2419 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs); 2420 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs); 2421 2421 2422 2422 /** @todo check if the hidden bits are loaded correctly for 64-bit … … 2424 2424 2425 2425 if (cbPop) 2426 iemRegAddToRsp(p IemCpu, pCtx, cbPop);2426 iemRegAddToRsp(pVCpu, pCtx, cbPop); 2427 2427 pCtx->eflags.Bits.u1RF = 0; 2428 2428 … … 2440 2440 /** @todo Testcase: Is this correct? */ 2441 2441 if ( DescCs.Legacy.Gen.u1Long 2442 && IEM_IS_LONG_MODE(p IemCpu) )2442 && IEM_IS_LONG_MODE(pVCpu) ) 2443 2443 { 2444 2444 if (!IEM_IS_CANONICAL(uNewRip)) 2445 2445 { 2446 2446 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip)); 2447 return iemRaiseNotCanonical(p IemCpu);2447 return iemRaiseNotCanonical(pVCpu); 2448 2448 } 2449 2449 u64Base = 0; … … 2455 2455 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs)); 2456 2456 /** @todo: Intel says this is #GP(0)! */ 2457 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);2457 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2458 2458 } 2459 2459 u64Base = X86DESC_BASE(&DescCs.Legacy); … … 2468 2468 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2469 2469 { 2470 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCs);2470 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs); 2471 2471 if (rcStrict != VINF_SUCCESS) 2472 2472 return rcStrict; … … 2476 2476 2477 2477 /* commit */ 2478 rcStrict = iemMemStackPopCommitSpecial(p IemCpu, uPtrFrame.pv, uNewRsp);2478 rcStrict = iemMemStackPopCommitSpecial(pVCpu, uPtrFrame.pv, uNewRsp); 2479 2479 if (rcStrict != VINF_SUCCESS) 2480 2480 return rcStrict; … … 2491 2491 /** @todo check if the hidden bits are loaded correctly for 64-bit 2492 2492 * mode. */ 2493 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);2493 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 2494 2494 if (cbPop) 2495 iemRegAddToRsp(p IemCpu, pCtx, cbPop);2495 iemRegAddToRsp(pVCpu, pCtx, cbPop); 2496 2496 pCtx->eflags.Bits.u1RF = 0; 2497 2497 } 2498 2498 2499 2499 /* Flush the prefetch buffer. */ 2500 p IemCpu->cbOpcode = pIemCpu->offOpcode;2500 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2501 2501 return VINF_SUCCESS; 2502 2502 } … … 2515 2515 IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop) 2516 2516 { 2517 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);2517 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 2518 2518 NOREF(cbInstr); 2519 2519 … … 2527 2527 case IEMMODE_16BIT: 2528 2528 NewRip.u = 0; 2529 rcStrict = iemMemStackPopU16Ex(p IemCpu, &NewRip.Words.w0, &NewRsp);2529 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp); 2530 2530 break; 2531 2531 case IEMMODE_32BIT: 2532 2532 NewRip.u = 0; 2533 rcStrict = iemMemStackPopU32Ex(p IemCpu, &NewRip.DWords.dw0, &NewRsp);2533 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp); 2534 2534 break; 2535 2535 case IEMMODE_64BIT: 2536 rcStrict = iemMemStackPopU64Ex(p IemCpu, &NewRip.u, &NewRsp);2536 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp); 2537 2537 break; 2538 2538 IEM_NOT_REACHED_DEFAULT_CASE_RET(); … … 2549 2549 { 2550 2550 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit)); 2551 return iemRaiseSelectorBounds(p IemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);2551 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 2552 2552 } 2553 2553 } … … 2557 2557 { 2558 2558 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u)); 2559 return iemRaiseNotCanonical(p IemCpu);2559 return iemRaiseNotCanonical(pVCpu); 2560 2560 } 2561 2561 } … … 2563 2563 /* Apply cbPop */ 2564 2564 if (cbPop) 2565 iemRegAddToRspEx(p IemCpu, pCtx, &NewRsp, cbPop);2565 iemRegAddToRspEx(pVCpu, pCtx, &NewRsp, cbPop); 2566 2566 2567 2567 /* Commit it. */ … … 2571 2571 2572 2572 /* Flush the prefetch buffer. */ 2573 p IemCpu->cbOpcode = pIemCpu->offOpcode;2573 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2574 2574 2575 2575 return VINF_SUCCESS; … … 2587 2587 IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters) 2588 2588 { 2589 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);2589 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 2590 2590 2591 2591 /* Push RBP, saving the old value in TmpRbp. */ … … 2596 2596 if (enmEffOpSize == IEMMODE_64BIT) 2597 2597 { 2598 rcStrict = iemMemStackPushU64Ex(p IemCpu, TmpRbp.u, &NewRsp);2598 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp); 2599 2599 NewRbp = NewRsp; 2600 2600 } 2601 2601 else if (enmEffOpSize == IEMMODE_32BIT) 2602 2602 { 2603 rcStrict = iemMemStackPushU32Ex(p IemCpu, TmpRbp.DWords.dw0, &NewRsp);2603 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp); 2604 2604 NewRbp = NewRsp; 2605 2605 } 2606 2606 else 2607 2607 { 2608 rcStrict = iemMemStackPushU16Ex(p IemCpu, TmpRbp.Words.w0, &NewRsp);2608 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp); 2609 2609 NewRbp = TmpRbp; 2610 2610 NewRbp.Words.w0 = NewRsp.Words.w0; … … 2627 2627 { 2628 2628 uint16_t u16Tmp; 2629 rcStrict = iemMemStackPopU16Ex(p IemCpu, &u16Tmp, &TmpRbp);2629 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp); 2630 2630 if (rcStrict != VINF_SUCCESS) 2631 2631 break; 2632 rcStrict = iemMemStackPushU16Ex(p IemCpu, u16Tmp, &NewRsp);2632 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp); 2633 2633 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS); 2634 2634 break; … … 2642 2642 { 2643 2643 uint32_t u32Tmp; 2644 rcStrict = iemMemStackPopU32Ex(p IemCpu, &u32Tmp, &TmpRbp);2644 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp); 2645 2645 if (rcStrict != VINF_SUCCESS) 2646 2646 break; 2647 rcStrict = iemMemStackPushU32Ex(p IemCpu, u32Tmp, &NewRsp);2647 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp); 2648 2648 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS); 2649 2649 break; … … 2654 2654 { 2655 2655 uint64_t u64Tmp; 2656 rcStrict = iemMemStackPopU64Ex(p IemCpu, &u64Tmp, &TmpRbp);2656 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp); 2657 2657 if (rcStrict != VINF_SUCCESS) 2658 2658 break; 2659 rcStrict = iemMemStackPushU64Ex(p IemCpu, u64Tmp, &NewRsp);2659 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp); 2660 2660 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS); 2661 2661 break; … … 2668 2668 /* Push the new RBP */ 2669 2669 if (enmEffOpSize == IEMMODE_64BIT) 2670 rcStrict = iemMemStackPushU64Ex(p IemCpu, NewRbp.u, &NewRsp);2670 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp); 2671 2671 else if (enmEffOpSize == IEMMODE_32BIT) 2672 rcStrict = iemMemStackPushU32Ex(p IemCpu, NewRbp.DWords.dw0, &NewRsp);2672 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp); 2673 2673 else 2674 rcStrict = iemMemStackPushU16Ex(p IemCpu, NewRbp.Words.w0, &NewRsp);2674 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp); 2675 2675 if (rcStrict != VINF_SUCCESS) 2676 2676 return rcStrict; … … 2679 2679 2680 2680 /* Recalc RSP. */ 2681 iemRegSubFromRspEx(p IemCpu, pCtx, &NewRsp, cbFrame);2681 iemRegSubFromRspEx(pVCpu, pCtx, &NewRsp, cbFrame); 2682 2682 2683 2683 /** @todo Should probe write access at the new RSP according to AMD. */ … … 2686 2686 pCtx->rbp = NewRbp.u; 2687 2687 pCtx->rsp = NewRsp.u; 2688 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);2688 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 2689 2689 2690 2690 return VINF_SUCCESS; … … 2703 2703 IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize) 2704 2704 { 2705 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);2705 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 2706 2706 2707 2707 /* Calculate the intermediate RSP from RBP and the stack attributes. */ 2708 2708 RTUINT64U NewRsp; 2709 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)2709 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 2710 2710 NewRsp.u = pCtx->rbp; 2711 2711 else if (pCtx->ss.Attr.n.u1DefBig) … … 2725 2725 case IEMMODE_16BIT: 2726 2726 NewRbp.u = pCtx->rbp; 2727 rcStrict = iemMemStackPopU16Ex(p IemCpu, &NewRbp.Words.w0, &NewRsp);2727 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp); 2728 2728 break; 2729 2729 case IEMMODE_32BIT: 2730 2730 NewRbp.u = 0; 2731 rcStrict = iemMemStackPopU32Ex(p IemCpu, &NewRbp.DWords.dw0, &NewRsp);2731 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp); 2732 2732 break; 2733 2733 case IEMMODE_64BIT: 2734 rcStrict = iemMemStackPopU64Ex(p IemCpu, &NewRbp.u, &NewRsp);2734 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp); 2735 2735 break; 2736 2736 IEM_NOT_REACHED_DEFAULT_CASE_RET(); … … 2743 2743 pCtx->rbp = NewRbp.u; 2744 2744 pCtx->rsp = NewRsp.u; 2745 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);2745 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 2746 2746 2747 2747 return VINF_SUCCESS; … … 2757 2757 IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr) 2758 2758 { 2759 Assert(p IemCpu->cXcptRecursions == 0);2760 return iemRaiseXcptOrInt(p IemCpu,2759 Assert(pVCpu->iem.s.cXcptRecursions == 0); 2760 return iemRaiseXcptOrInt(pVCpu, 2761 2761 cbInstr, 2762 2762 u8Int, … … 2774 2774 IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize) 2775 2775 { 2776 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);2776 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 2777 2777 X86EFLAGS Efl; 2778 Efl.u = IEMMISC_GET_EFL(p IemCpu, pCtx);2778 Efl.u = IEMMISC_GET_EFL(pVCpu, pCtx); 2779 2779 NOREF(cbInstr); 2780 2780 … … 2785 2785 && Efl.Bits.u2IOPL != 3 2786 2786 && !(pCtx->cr4 & X86_CR4_VME)) 2787 return iemRaiseGeneralProtectionFault0(p IemCpu);2787 return iemRaiseGeneralProtectionFault0(pVCpu); 2788 2788 2789 2789 /* … … 2800 2800 if (enmEffOpSize == IEMMODE_32BIT) 2801 2801 { 2802 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, 12, &uFrame.pv, &uNewRsp);2802 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp); 2803 2803 if (rcStrict != VINF_SUCCESS) 2804 2804 return rcStrict; 2805 2805 uNewEip = uFrame.pu32[0]; 2806 2806 if (uNewEip > UINT16_MAX) 2807 return iemRaiseGeneralProtectionFault0(p IemCpu);2807 return iemRaiseGeneralProtectionFault0(pVCpu); 2808 2808 2809 2809 uNewCs = (uint16_t)uFrame.pu32[1]; … … 2813 2813 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/ 2814 2814 | X86_EFL_ID; 2815 if (IEM_GET_TARGET_CPU(p IemCpu) <= IEMTARGETCPU_386)2815 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386) 2816 2816 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP); 2817 2817 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1); … … 2819 2819 else 2820 2820 { 2821 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, 6, &uFrame.pv, &uNewRsp);2821 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp); 2822 2822 if (rcStrict != VINF_SUCCESS) 2823 2823 return rcStrict; … … 2831 2831 * reserved flags. We just ignore them. */ 2832 2832 /* Ancient CPU adjustments: See iemCImpl_popf. */ 2833 if (IEM_GET_TARGET_CPU(p IemCpu) == IEMTARGETCPU_286)2833 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286) 2834 2834 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL); 2835 2835 } … … 2844 2844 * right? */ 2845 2845 if (uNewEip > pCtx->cs.u32Limit) 2846 return iemRaiseSelectorBounds(p IemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);2846 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 2847 2847 2848 2848 /* … … 2869 2869 } 2870 2870 else 2871 return iemRaiseGeneralProtectionFault0(p IemCpu);2871 return iemRaiseGeneralProtectionFault0(pVCpu); 2872 2872 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags)); 2873 2873 } … … 2876 2876 * Commit the operation. 2877 2877 */ 2878 rcStrict = iemMemStackPopCommitSpecial(p IemCpu, uFrame.pv, uNewRsp);2878 rcStrict = iemMemStackPopCommitSpecial(pVCpu, uFrame.pv, uNewRsp); 2879 2879 if (rcStrict != VINF_SUCCESS) 2880 2880 return rcStrict; 2881 2881 #ifdef DBGFTRACE_ENABLED 2882 RTTraceBufAddMsgF( IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",2882 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx", 2883 2883 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp); 2884 2884 #endif … … 2891 2891 /** @todo do we load attribs and limit as well? */ 2892 2892 Assert(uNewFlags & X86_EFL_1); 2893 IEMMISC_SET_EFL(p IemCpu, pCtx, uNewFlags);2893 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags); 2894 2894 2895 2895 /* Flush the prefetch buffer. */ 2896 p IemCpu->cbOpcode = pIemCpu->offOpcode;2896 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2897 2897 2898 2898 return VINF_SUCCESS; … … 2938 2938 VBOXSTRICTRC rcStrict; 2939 2939 RTCPTRUNION uFrame; 2940 rcStrict = iemMemStackPopContinueSpecial(p IemCpu, 24, &uFrame.pv, &uNewRsp);2940 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 24, &uFrame.pv, &uNewRsp); 2941 2941 if (rcStrict != VINF_SUCCESS) 2942 2942 return rcStrict; … … 2947 2947 uint16_t uNewFs = uFrame.pu32[4]; 2948 2948 uint16_t uNewGs = uFrame.pu32[5]; 2949 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */2949 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ 2950 2950 if (rcStrict != VINF_SUCCESS) 2951 2951 return rcStrict; … … 2957 2957 uNewFlags |= X86_EFL_RA1_MASK; 2958 2958 #ifdef DBGFTRACE_ENABLED 2959 RTTraceBufAddMsgF( IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",2959 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x", 2960 2960 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp); 2961 2961 #endif 2962 2962 2963 IEMMISC_SET_EFL(p IemCpu, pCtx, uNewFlags);2963 IEMMISC_SET_EFL(pVCpu, pCtx, uNewFlags); 2964 2964 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs); 2965 2965 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs); … … 2970 2970 pCtx->rip = (uint16_t)uNewEip; 2971 2971 pCtx->rsp = uNewEsp; /** @todo check this out! */ 2972 p IemCpu->uCpl = 3;2972 pVCpu->iem.s.uCpl = 3; 2973 2973 2974 2974 /* Flush the prefetch buffer. */ 2975 p IemCpu->cbOpcode = pIemCpu->offOpcode;2975 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 2976 2976 2977 2977 return VINF_SUCCESS; … … 2994 2994 */ 2995 2995 RTSEL uSelRet; 2996 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);2997 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(p IemCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base);2996 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 2997 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base); 2998 2998 if (rcStrict != VINF_SUCCESS) 2999 2999 return rcStrict; … … 3005 3005 { 3006 3006 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet)); 3007 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, uSelRet);3007 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet); 3008 3008 } 3009 3009 3010 3010 IEMSELDESC TssDesc; 3011 rcStrict = iemMemFetchSelDesc(p IemCpu, &TssDesc, uSelRet, X86_XCPT_GP);3011 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP); 3012 3012 if (rcStrict != VINF_SUCCESS) 3013 3013 return rcStrict; … … 3016 3016 { 3017 3017 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet)); 3018 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);3018 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL); 3019 3019 } 3020 3020 … … 3023 3023 { 3024 3024 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type)); 3025 return iemRaiseTaskSwitchFaultBySelector(p IemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);3025 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL); 3026 3026 } 3027 3027 … … 3029 3029 { 3030 3030 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet)); 3031 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSelRet & X86_SEL_MASK_OFF_RPL);3031 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL); 3032 3032 } 3033 3033 3034 3034 uint32_t uNextEip = pCtx->eip + cbInstr; 3035 return iemTaskSwitch(p IemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,3035 return iemTaskSwitch(pVCpu, pVCpu->iem.s.CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */, 3036 3036 0 /* uCr2 */, uSelRet, &TssDesc); 3037 3037 #endif … … 3046 3046 IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) 3047 3047 { 3048 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);3048 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 3049 3049 NOREF(cbInstr); 3050 3050 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); … … 3071 3071 if (enmEffOpSize == IEMMODE_32BIT) 3072 3072 { 3073 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, 12, &uFrame.pv, &uNewRsp);3073 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp); 3074 3074 if (rcStrict != VINF_SUCCESS) 3075 3075 return rcStrict; … … 3080 3080 else 3081 3081 { 3082 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, 6, &uFrame.pv, &uNewRsp);3082 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp); 3083 3083 if (rcStrict != VINF_SUCCESS) 3084 3084 return rcStrict; … … 3087 3087 uNewFlags = uFrame.pu16[2]; 3088 3088 } 3089 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */3089 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ 3090 3090 if (rcStrict != VINF_SUCCESS) 3091 3091 return rcStrict; … … 3096 3096 */ 3097 3097 if ( (uNewFlags & X86_EFL_VM) 3098 && p IemCpu->uCpl == 0)3098 && pVCpu->iem.s.uCpl == 0) 3099 3099 { 3100 3100 Assert(enmEffOpSize == IEMMODE_32BIT); … … 3109 3109 { 3110 3110 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip)); 3111 return iemRaiseGeneralProtectionFault0(p IemCpu);3111 return iemRaiseGeneralProtectionFault0(pVCpu); 3112 3112 } 3113 3113 3114 3114 IEMSELDESC DescCS; 3115 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescCS, uNewCs, X86_XCPT_GP);3115 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP); 3116 3116 if (rcStrict != VINF_SUCCESS) 3117 3117 { … … 3124 3124 { 3125 3125 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 3126 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3126 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3127 3127 } 3128 3128 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 3129 3129 { 3130 3130 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 3131 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3131 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3132 3132 } 3133 3133 3134 3134 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 3135 3135 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */ 3136 PVM pVM = IEMCPU_TO_VM(pIemCpu);3136 PVM pVM = pVCpu->CTX_SUFF(pVM); 3137 3137 if (EMIsRawRing0Enabled(pVM) && !HMIsEnabled(pVM)) 3138 3138 { 3139 3139 if ((uNewCs & X86_SEL_RPL) == 1) 3140 3140 { 3141 if ( p IemCpu->uCpl == 03141 if ( pVCpu->iem.s.uCpl == 0 3142 3142 && ( !EMIsRawRing1Enabled(pVM) 3143 3143 || pCtx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) ) … … 3147 3147 } 3148 3148 # ifdef LOG_ENABLED 3149 else if (p IemCpu->uCpl <= 1 && EMIsRawRing1Enabled(pVM))3149 else if (pVCpu->iem.s.uCpl <= 1 && EMIsRawRing1Enabled(pVM)) 3150 3150 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs)); 3151 3151 # endif … … 3153 3153 else if ( (uNewCs & X86_SEL_RPL) == 2 3154 3154 && EMIsRawRing1Enabled(pVM) 3155 && p IemCpu->uCpl <= 1)3155 && pVCpu->iem.s.uCpl <= 1) 3156 3156 { 3157 3157 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1)); … … 3168 3168 { 3169 3169 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl)); 3170 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3170 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3171 3171 } 3172 3172 } … … 3174 3174 { 3175 3175 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl)); 3176 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3177 } 3178 if ((uNewCs & X86_SEL_RPL) < p IemCpu->uCpl)3179 { 3180 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, p IemCpu->uCpl));3181 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3176 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3177 } 3178 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl) 3179 { 3180 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pVCpu->iem.s.uCpl)); 3181 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3182 3182 } 3183 3183 … … 3186 3186 { 3187 3187 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip)); 3188 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uNewCs);3188 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs); 3189 3189 } 3190 3190 … … 3194 3194 * Return to outer level? 3195 3195 */ 3196 if ((uNewCs & X86_SEL_RPL) != p IemCpu->uCpl)3196 if ((uNewCs & X86_SEL_RPL) != pVCpu->iem.s.uCpl) 3197 3197 { 3198 3198 uint16_t uNewSS; … … 3200 3200 if (enmEffOpSize == IEMMODE_32BIT) 3201 3201 { 3202 rcStrict = iemMemStackPopContinueSpecial(p IemCpu, 8, &uFrame.pv, &uNewRsp);3202 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 8, &uFrame.pv, &uNewRsp); 3203 3203 if (rcStrict != VINF_SUCCESS) 3204 3204 return rcStrict; … … 3211 3211 else 3212 3212 { 3213 rcStrict = iemMemStackPopContinueSpecial(p IemCpu, 4, &uFrame.pv, &uNewRsp);3213 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 4, &uFrame.pv, &uNewRsp); 3214 3214 if (rcStrict != VINF_SUCCESS) 3215 3215 return rcStrict; … … 3217 3217 uNewSS = uFrame.pu16[1]; 3218 3218 } 3219 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);3219 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); 3220 3220 if (rcStrict != VINF_SUCCESS) 3221 3221 return rcStrict; … … 3226 3226 { 3227 3227 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP)); 3228 return iemRaiseGeneralProtectionFault0(p IemCpu);3228 return iemRaiseGeneralProtectionFault0(pVCpu); 3229 3229 } 3230 3230 3231 3231 IEMSELDESC DescSS; 3232 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */3232 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */ 3233 3233 if (rcStrict != VINF_SUCCESS) 3234 3234 { … … 3242 3242 { 3243 3243 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP)); 3244 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSS);3244 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS); 3245 3245 } 3246 3246 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL)) … … 3248 3248 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n", 3249 3249 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl)); 3250 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSS);3250 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS); 3251 3251 } 3252 3252 … … 3256 3256 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n", 3257 3257 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 3258 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSS);3258 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS); 3259 3259 } 3260 3260 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) … … 3262 3262 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n", 3263 3263 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 3264 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSS);3264 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS); 3265 3265 } 3266 3266 … … 3269 3269 { 3270 3270 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP)); 3271 return iemRaiseStackSelectorNotPresentBySelector(p IemCpu, uNewSS);3271 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS); 3272 3272 } 3273 3273 … … 3280 3280 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS)); 3281 3281 /** @todo: Which is it, #GP(0) or #GP(sel)? */ 3282 return iemRaiseSelectorBoundsBySelector(p IemCpu, uNewCs);3282 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs); 3283 3283 } 3284 3284 … … 3289 3289 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3290 3290 { 3291 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCs);3291 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs); 3292 3292 if (rcStrict != VINF_SUCCESS) 3293 3293 return rcStrict; … … 3296 3296 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3297 3297 { 3298 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewSS);3298 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS); 3299 3299 if (rcStrict != VINF_SUCCESS) 3300 3300 return rcStrict; … … 3306 3306 if (enmEffOpSize != IEMMODE_16BIT) 3307 3307 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 3308 if (p IemCpu->uCpl == 0)3308 if (pVCpu->iem.s.uCpl == 0) 3309 3309 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 3310 else if (p IemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)3310 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL) 3311 3311 fEFlagsMask |= X86_EFL_IF; 3312 if (IEM_GET_TARGET_CPU(p IemCpu) <= IEMTARGETCPU_386)3312 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386) 3313 3313 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP); 3314 uint32_t fEFlagsNew = IEMMISC_GET_EFL(p IemCpu, pCtx);3314 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx); 3315 3315 fEFlagsNew &= ~fEFlagsMask; 3316 3316 fEFlagsNew |= uNewFlags & fEFlagsMask; 3317 3317 #ifdef DBGFTRACE_ENABLED 3318 RTTraceBufAddMsgF( IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",3319 p IemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip,3318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x", 3319 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip, 3320 3320 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP); 3321 3321 #endif 3322 3322 3323 IEMMISC_SET_EFL(p IemCpu, pCtx, fEFlagsNew);3323 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew); 3324 3324 pCtx->rip = uNewEip; 3325 3325 pCtx->cs.Sel = uNewCs; … … 3329 3329 pCtx->cs.u32Limit = cbLimitCS; 3330 3330 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 3331 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);3331 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 3332 3332 if (!pCtx->ss.Attr.n.u1DefBig) 3333 3333 pCtx->sp = (uint16_t)uNewESP; … … 3341 3341 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 3342 3342 3343 p IemCpu->uCpl = uNewCs & X86_SEL_RPL;3344 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);3345 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);3346 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);3347 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);3343 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL; 3344 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->ds); 3345 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->es); 3346 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->fs); 3347 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pCtx->gs); 3348 3348 3349 3349 /* Done! */ … … 3360 3360 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS)); 3361 3361 /** @todo: Which is it, #GP(0) or #GP(sel)? */ 3362 return iemRaiseSelectorBoundsBySelector(p IemCpu, uNewCs);3362 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs); 3363 3363 } 3364 3364 … … 3368 3368 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3369 3369 { 3370 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCs);3370 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs); 3371 3371 if (rcStrict != VINF_SUCCESS) 3372 3372 return rcStrict; … … 3375 3375 3376 3376 X86EFLAGS NewEfl; 3377 NewEfl.u = IEMMISC_GET_EFL(p IemCpu, pCtx);3377 NewEfl.u = IEMMISC_GET_EFL(pVCpu, pCtx); 3378 3378 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 3379 3379 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 3380 3380 if (enmEffOpSize != IEMMODE_16BIT) 3381 3381 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 3382 if (p IemCpu->uCpl == 0)3382 if (pVCpu->iem.s.uCpl == 0) 3383 3383 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 3384 else if (p IemCpu->uCpl <= NewEfl.Bits.u2IOPL)3384 else if (pVCpu->iem.s.uCpl <= NewEfl.Bits.u2IOPL) 3385 3385 fEFlagsMask |= X86_EFL_IF; 3386 if (IEM_GET_TARGET_CPU(p IemCpu) <= IEMTARGETCPU_386)3386 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386) 3387 3387 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP); 3388 3388 NewEfl.u &= ~fEFlagsMask; 3389 3389 NewEfl.u |= fEFlagsMask & uNewFlags; 3390 3390 #ifdef DBGFTRACE_ENABLED 3391 RTTraceBufAddMsgF( IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",3392 p IemCpu->uCpl, pCtx->cs.Sel, pCtx->eip,3391 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx", 3392 pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip, 3393 3393 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp); 3394 3394 #endif 3395 3395 3396 IEMMISC_SET_EFL(p IemCpu, pCtx, NewEfl.u);3396 IEMMISC_SET_EFL(pVCpu, pCtx, NewEfl.u); 3397 3397 pCtx->rip = uNewEip; 3398 3398 pCtx->cs.Sel = uNewCs; … … 3402 3402 pCtx->cs.u32Limit = cbLimitCS; 3403 3403 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 3404 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);3404 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 3405 3405 pCtx->rsp = uNewRsp; 3406 3406 /* Done! */ … … 3408 3408 3409 3409 /* Flush the prefetch buffer. */ 3410 p IemCpu->cbOpcode = pIemCpu->offOpcode;3410 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3411 3411 3412 3412 return VINF_SUCCESS; … … 3421 3421 IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize) 3422 3422 { 3423 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);3423 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 3424 3424 NOREF(cbInstr); 3425 3425 … … 3430 3430 { 3431 3431 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pCtx->eflags.u)); 3432 return iemRaiseGeneralProtectionFault0(p IemCpu);3432 return iemRaiseGeneralProtectionFault0(pVCpu); 3433 3433 } 3434 3434 … … 3448 3448 if (enmEffOpSize == IEMMODE_64BIT) 3449 3449 { 3450 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, 5*8, &uFrame.pv, &uNewRsp);3450 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp); 3451 3451 if (rcStrict != VINF_SUCCESS) 3452 3452 return rcStrict; … … 3459 3459 else if (enmEffOpSize == IEMMODE_32BIT) 3460 3460 { 3461 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, 5*4, &uFrame.pv, &uNewRsp);3461 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp); 3462 3462 if (rcStrict != VINF_SUCCESS) 3463 3463 return rcStrict; … … 3471 3471 { 3472 3472 Assert(enmEffOpSize == IEMMODE_16BIT); 3473 rcStrict = iemMemStackPopBeginSpecial(p IemCpu, 5*2, &uFrame.pv, &uNewRsp);3473 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp); 3474 3474 if (rcStrict != VINF_SUCCESS) 3475 3475 return rcStrict; … … 3480 3480 uNewSs = uFrame.pu16[4]; 3481 3481 } 3482 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */3482 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ 3483 3483 if (rcStrict != VINF_SUCCESS) 3484 3484 return rcStrict; … … 3492 3492 { 3493 3493 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 3494 return iemRaiseGeneralProtectionFault0(p IemCpu);3494 return iemRaiseGeneralProtectionFault0(pVCpu); 3495 3495 } 3496 3496 3497 3497 IEMSELDESC DescCS; 3498 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescCS, uNewCs, X86_XCPT_GP);3498 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP); 3499 3499 if (rcStrict != VINF_SUCCESS) 3500 3500 { … … 3510 3510 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n", 3511 3511 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type)); 3512 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3512 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3513 3513 } 3514 3514 … … 3520 3520 { 3521 3521 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl)); 3522 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3522 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3523 3523 } 3524 3524 } … … 3526 3526 { 3527 3527 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl)); 3528 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3529 } 3530 if ((uNewCs & X86_SEL_RPL) < p IemCpu->uCpl)3531 { 3532 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, p IemCpu->uCpl));3533 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewCs);3528 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3529 } 3530 if ((uNewCs & X86_SEL_RPL) < pVCpu->iem.s.uCpl) 3531 { 3532 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, pVCpu->iem.s.uCpl)); 3533 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3534 3534 } 3535 3535 … … 3538 3538 { 3539 3539 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 3540 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uNewCs);3540 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs); 3541 3541 } 3542 3542 … … 3552 3552 { 3553 3553 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 3554 return iemRaiseGeneralProtectionFault0(p IemCpu);3554 return iemRaiseGeneralProtectionFault0(pVCpu); 3555 3555 } 3556 3556 DescSS.Legacy.u = 0; … … 3558 3558 else 3559 3559 { 3560 rcStrict = iemMemFetchSelDesc(p IemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */3560 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */ 3561 3561 if (rcStrict != VINF_SUCCESS) 3562 3562 { … … 3571 3571 { 3572 3572 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 3573 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSs);3573 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs); 3574 3574 } 3575 3575 … … 3583 3583 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n", 3584 3584 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl)); 3585 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSs);3585 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs); 3586 3586 } 3587 3587 … … 3591 3591 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n", 3592 3592 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type)); 3593 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSs);3593 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs); 3594 3594 } 3595 3595 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) … … 3597 3597 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n", 3598 3598 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type)); 3599 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewSs);3599 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs); 3600 3600 } 3601 3601 … … 3604 3604 { 3605 3605 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp)); 3606 return iemRaiseStackSelectorNotPresentBySelector(p IemCpu, uNewSs);3606 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs); 3607 3607 } 3608 3608 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy); … … 3616 3616 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n", 3617 3617 uNewCs, uNewRip, uNewSs, uNewRsp)); 3618 return iemRaiseSelectorBoundsBySelector(p IemCpu, uNewCs);3618 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs); 3619 3619 } 3620 3620 } … … 3626 3626 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS)); 3627 3627 /** @todo: Which is it, #GP(0) or #GP(sel)? */ 3628 return iemRaiseSelectorBoundsBySelector(p IemCpu, uNewCs);3628 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs); 3629 3629 } 3630 3630 } … … 3637 3637 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3638 3638 { 3639 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewCs);3639 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs); 3640 3640 if (rcStrict != VINF_SUCCESS) 3641 3641 return rcStrict; … … 3644 3644 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 3645 3645 { 3646 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uNewSs);3646 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs); 3647 3647 if (rcStrict != VINF_SUCCESS) 3648 3648 return rcStrict; … … 3654 3654 if (enmEffOpSize != IEMMODE_16BIT) 3655 3655 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 3656 if (p IemCpu->uCpl == 0)3656 if (pVCpu->iem.s.uCpl == 0) 3657 3657 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */ 3658 else if (p IemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)3658 else if (pVCpu->iem.s.uCpl <= pCtx->eflags.Bits.u2IOPL) 3659 3659 fEFlagsMask |= X86_EFL_IF; 3660 uint32_t fEFlagsNew = IEMMISC_GET_EFL(p IemCpu, pCtx);3660 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu, pCtx); 3661 3661 fEFlagsNew &= ~fEFlagsMask; 3662 3662 fEFlagsNew |= uNewFlags & fEFlagsMask; 3663 3663 #ifdef DBGFTRACE_ENABLED 3664 RTTraceBufAddMsgF( IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",3665 p IemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);3664 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx", 3665 pVCpu->iem.s.uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp); 3666 3666 #endif 3667 3667 3668 IEMMISC_SET_EFL(p IemCpu, pCtx, fEFlagsNew);3668 IEMMISC_SET_EFL(pVCpu, pCtx, fEFlagsNew); 3669 3669 pCtx->rip = uNewRip; 3670 3670 pCtx->cs.Sel = uNewCs; … … 3674 3674 pCtx->cs.u32Limit = cbLimitCS; 3675 3675 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 3676 p IemCpu->enmCpuMode = iemCalcCpuMode(pCtx);3676 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx); 3677 3677 if (pCtx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig) 3678 3678 pCtx->rsp = uNewRsp; … … 3698 3698 } 3699 3699 3700 if (p IemCpu->uCpl != uNewCpl)3701 { 3702 p IemCpu->uCpl = uNewCpl;3703 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCpl, &pCtx->ds);3704 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCpl, &pCtx->es);3705 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCpl, &pCtx->fs);3706 iemHlpAdjustSelectorForNewCpl(p IemCpu, uNewCpl, &pCtx->gs);3700 if (pVCpu->iem.s.uCpl != uNewCpl) 3701 { 3702 pVCpu->iem.s.uCpl = uNewCpl; 3703 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->ds); 3704 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->es); 3705 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->fs); 3706 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pCtx->gs); 3707 3707 } 3708 3708 3709 3709 /* Flush the prefetch buffer. */ 3710 p IemCpu->cbOpcode = pIemCpu->offOpcode;3710 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3711 3711 3712 3712 return VINF_SUCCESS; … … 3724 3724 * First, clear NMI blocking, if any, before causing any exceptions. 3725 3725 */ 3726 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);3727 3726 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 3728 3727 … … 3730 3729 * Call a mode specific worker. 3731 3730 */ 3732 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))3731 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 3733 3732 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize); 3734 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)3733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 3735 3734 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize); 3736 3735 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize); … … 3745 3744 IEM_CIMPL_DEF_0(iemCImpl_syscall) 3746 3745 { 3747 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);3746 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 3748 3747 3749 3748 /* … … 3757 3756 { 3758 3757 Log(("syscall: Not enabled in EFER -> #UD\n")); 3759 return iemRaiseUndefinedOpcode(p IemCpu);3758 return iemRaiseUndefinedOpcode(pVCpu); 3760 3759 } 3761 3760 if (!(pCtx->cr0 & X86_CR0_PE)) 3762 3761 { 3763 3762 Log(("syscall: Protected mode is required -> #GP(0)\n")); 3764 return iemRaiseGeneralProtectionFault0(p IemCpu);3765 } 3766 if (IEM_IS_GUEST_CPU_INTEL(p IemCpu) && !CPUMIsGuestInLongModeEx(pCtx))3763 return iemRaiseGeneralProtectionFault0(pVCpu); 3764 } 3765 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx)) 3767 3766 { 3768 3767 Log(("syscall: Only available in long mode on intel -> #UD\n")); 3769 return iemRaiseUndefinedOpcode(p IemCpu);3768 return iemRaiseUndefinedOpcode(pVCpu); 3770 3769 } 3771 3770 … … 3777 3776 { 3778 3777 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n")); 3779 return iemRaiseGeneralProtectionFault0(p IemCpu);3778 return iemRaiseGeneralProtectionFault0(pVCpu); 3780 3779 } 3781 3780 … … 3783 3782 if (CPUMIsGuestInLongModeEx(pCtx)) 3784 3783 { 3785 uint64_t uNewRip = p IemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR;3784 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->msrLSTAR : pCtx-> msrCSTAR; 3786 3785 3787 3786 /* This test isn't in the docs, but I'm not trusting the guys writing … … 3790 3789 { 3791 3790 Log(("syscall: Only available in long mode on intel -> #UD\n")); 3792 return iemRaiseUndefinedOpcode(p IemCpu);3791 return iemRaiseUndefinedOpcode(pVCpu); 3793 3792 } 3794 3793 … … 3835 3834 3836 3835 /* Flush the prefetch buffer. */ 3837 p IemCpu->cbOpcode = pIemCpu->offOpcode;3836 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3838 3837 3839 3838 return VINF_SUCCESS; … … 3847 3846 3848 3847 { 3849 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);3848 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 3850 3849 3851 3850 /* … … 3859 3858 { 3860 3859 Log(("sysret: Not enabled in EFER -> #UD\n")); 3861 return iemRaiseUndefinedOpcode(p IemCpu);3862 } 3863 if (IEM_IS_GUEST_CPU_INTEL(p IemCpu) && !CPUMIsGuestInLongModeEx(pCtx))3860 return iemRaiseUndefinedOpcode(pVCpu); 3861 } 3862 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(pCtx)) 3864 3863 { 3865 3864 Log(("sysret: Only available in long mode on intel -> #UD\n")); 3866 return iemRaiseUndefinedOpcode(p IemCpu);3865 return iemRaiseUndefinedOpcode(pVCpu); 3867 3866 } 3868 3867 if (!(pCtx->cr0 & X86_CR0_PE)) 3869 3868 { 3870 3869 Log(("sysret: Protected mode is required -> #GP(0)\n")); 3871 return iemRaiseGeneralProtectionFault0(p IemCpu);3872 } 3873 if (p IemCpu->uCpl != 0)3874 { 3875 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", p IemCpu->uCpl));3876 return iemRaiseGeneralProtectionFault0(p IemCpu);3870 return iemRaiseGeneralProtectionFault0(pVCpu); 3871 } 3872 if (pVCpu->iem.s.uCpl != 0) 3873 { 3874 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 3875 return iemRaiseGeneralProtectionFault0(pVCpu); 3877 3876 } 3878 3877 … … 3880 3879 uint16_t uNewCs = (pCtx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL; 3881 3880 uint16_t uNewSs = uNewCs + 8; 3882 if (p IemCpu->enmEffOpSize == IEMMODE_64BIT)3881 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT) 3883 3882 uNewCs += 16; 3884 3883 if (uNewCs == 0 || uNewSs == 0) 3885 3884 { 3886 3885 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n")); 3887 return iemRaiseGeneralProtectionFault0(p IemCpu);3886 return iemRaiseGeneralProtectionFault0(pVCpu); 3888 3887 } 3889 3888 … … 3893 3892 if (CPUMIsGuestInLongModeEx(pCtx)) 3894 3893 { 3895 if (p IemCpu->enmEffOpSize == IEMMODE_64BIT)3894 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT) 3896 3895 { 3897 3896 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", … … 3939 3938 3940 3939 /* Flush the prefetch buffer. */ 3941 p IemCpu->cbOpcode = pIemCpu->offOpcode;3940 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode; 3942 3941 3943 3942 return VINF_SUCCESS; … … 3953 3952 IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) 3954 3953 { 3955 /*PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);*/3956 uint16_t *pSel = iemSRegRef(p IemCpu, iSegReg);3957 PCPUMSELREGHID pHid = iemSRegGetHid(p IemCpu, iSegReg);3954 /*PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);*/ 3955 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg); 3956 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg); 3958 3957 3959 3958 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS); … … 3962 3961 * Real mode and V8086 mode are easy. 3963 3962 */ 3964 if ( p IemCpu->enmCpuMode == IEMMODE_16BIT3965 && IEM_IS_REAL_OR_V86_MODE(p IemCpu))3963 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT 3964 && IEM_IS_REAL_OR_V86_MODE(pVCpu)) 3966 3965 { 3967 3966 *pSel = uSel; … … 3981 3980 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE; 3982 3981 #endif 3983 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);3984 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);3982 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 3983 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 3985 3984 return VINF_SUCCESS; 3986 3985 } … … 4001 4000 relaxed relationship to SS.RPL than intel does. */ 4002 4001 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */ 4003 if ( p IemCpu->enmCpuMode != IEMMODE_64BIT4004 || p IemCpu->uCpl > 24005 || ( uSel != p IemCpu->uCpl4006 && !IEM_IS_GUEST_CPU_AMD(p IemCpu)) )4002 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT 4003 || pVCpu->iem.s.uCpl > 2 4004 || ( uSel != pVCpu->iem.s.uCpl 4005 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) ) 4007 4006 { 4008 4007 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel)); 4009 return iemRaiseGeneralProtectionFault0(p IemCpu);4008 return iemRaiseGeneralProtectionFault0(pVCpu); 4010 4009 } 4011 4010 } 4012 4011 4013 4012 *pSel = uSel; /* Not RPL, remember :-) */ 4014 iemHlpLoadNullDataSelectorProt(p IemCpu, pHid, uSel);4013 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel); 4015 4014 if (iSegReg == X86_SREG_SS) 4016 pHid->Attr.u |= p IemCpu->uCpl << X86DESCATTR_DPL_SHIFT;4017 4018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pHid));4019 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);4020 4021 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4015 pHid->Attr.u |= pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT; 4016 4017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid)); 4018 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 4019 4020 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4022 4021 return VINF_SUCCESS; 4023 4022 } … … 4025 4024 /* Fetch the descriptor. */ 4026 4025 IEMSELDESC Desc; 4027 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(p IemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */4026 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */ 4028 4027 if (rcStrict != VINF_SUCCESS) 4029 4028 return rcStrict; … … 4033 4032 { 4034 4033 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type)); 4035 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4034 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4036 4035 } 4037 4036 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */ … … 4041 4040 { 4042 4041 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type)); 4043 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4044 } 4045 if ((uSel & X86_SEL_RPL) != p IemCpu->uCpl)4046 { 4047 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, p IemCpu->uCpl));4048 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4049 } 4050 if (Desc.Legacy.Gen.u2Dpl != p IemCpu->uCpl)4051 { 4052 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, p IemCpu->uCpl));4053 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4042 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4043 } 4044 if ((uSel & X86_SEL_RPL) != pVCpu->iem.s.uCpl) 4045 { 4046 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pVCpu->iem.s.uCpl)); 4047 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4048 } 4049 if (Desc.Legacy.Gen.u2Dpl != pVCpu->iem.s.uCpl) 4050 { 4051 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl)); 4052 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4054 4053 } 4055 4054 } … … 4059 4058 { 4060 4059 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel)); 4061 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4060 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4062 4061 } 4063 4062 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) … … 4066 4065 #if 0 /* this is what intel says. */ 4067 4066 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl 4068 && p IemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)4067 && pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl) 4069 4068 { 4070 4069 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n", 4071 iSegReg, uSel, (uSel & X86_SEL_RPL), p IemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));4072 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4070 iSegReg, uSel, (uSel & X86_SEL_RPL), pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl)); 4071 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4073 4072 } 4074 4073 #else /* this is what makes more sense. */ … … 4077 4076 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n", 4078 4077 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl)); 4079 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4078 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4080 4079 } 4081 if (p IemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)4080 if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl) 4082 4081 { 4083 4082 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n", 4084 iSegReg, uSel, p IemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));4085 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uSel);4083 iSegReg, uSel, pVCpu->iem.s.uCpl, Desc.Legacy.Gen.u2Dpl)); 4084 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 4086 4085 } 4087 4086 #endif … … 4093 4092 { 4094 4093 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel)); 4095 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uSel);4094 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel); 4096 4095 } 4097 4096 … … 4106 4105 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 4107 4106 { 4108 rcStrict = iemMemMarkSelDescAccessed(p IemCpu, uSel);4107 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel); 4109 4108 if (rcStrict != VINF_SUCCESS) 4110 4109 return rcStrict; … … 4122 4121 /** @todo check if the hidden bits are loaded correctly for 64-bit 4123 4122 * mode. */ 4124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( IEMCPU_TO_VMCPU(pIemCpu), pHid));4125 4126 CPUMSetChangedFlags( IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);4127 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid)); 4124 4125 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 4126 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4128 4127 return VINF_SUCCESS; 4129 4128 } … … 4143 4142 if (iSegReg == X86_SREG_SS) 4144 4143 { 4145 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4146 EMSetInhibitInterruptsPC( IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);4144 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4145 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 4147 4146 } 4148 4147 } … … 4159 4158 IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize) 4160 4159 { 4161 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4160 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4162 4161 VBOXSTRICTRC rcStrict; 4163 4162 … … 4172 4171 { 4173 4172 uint16_t uSel; 4174 rcStrict = iemMemStackPopU16Ex(p IemCpu, &uSel, &TmpRsp);4173 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp); 4175 4174 if (rcStrict == VINF_SUCCESS) 4176 4175 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel); … … 4181 4180 { 4182 4181 uint32_t u32Value; 4183 rcStrict = iemMemStackPopU32Ex(p IemCpu, &u32Value, &TmpRsp);4182 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp); 4184 4183 if (rcStrict == VINF_SUCCESS) 4185 4184 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value); … … 4190 4189 { 4191 4190 uint64_t u64Value; 4192 rcStrict = iemMemStackPopU64Ex(p IemCpu, &u64Value, &TmpRsp);4191 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp); 4193 4192 if (rcStrict == VINF_SUCCESS) 4194 4193 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value); … … 4205 4204 pCtx->rsp = TmpRsp.u; 4206 4205 if (iSegReg == X86_SREG_SS) 4207 EMSetInhibitInterruptsPC( IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);4206 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 4208 4207 } 4209 4208 return rcStrict; … … 4221 4220 IEMMODE, enmEffOpSize) 4222 4221 { 4223 /*PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);*/4222 /*PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx);*/ 4224 4223 VBOXSTRICTRC rcStrict; 4225 4224 … … 4235 4234 { 4236 4235 case IEMMODE_16BIT: 4237 *(uint16_t *)iemGRegRef(p IemCpu, iGReg) = offSeg;4236 *(uint16_t *)iemGRegRef(pVCpu, iGReg) = offSeg; 4238 4237 break; 4239 4238 case IEMMODE_32BIT: 4240 *(uint64_t *)iemGRegRef(p IemCpu, iGReg) = offSeg;4239 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg; 4241 4240 break; 4242 4241 case IEMMODE_64BIT: 4243 *(uint64_t *)iemGRegRef(p IemCpu, iGReg) = offSeg;4242 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = offSeg; 4244 4243 break; 4245 4244 IEM_NOT_REACHED_DEFAULT_CASE_RET(); … … 4258 4257 * @retval iemMemFetchSysU64 return value. 4259 4258 * 4260 * @param p IemCpu The IEM state of the calling EMT.4259 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4261 4260 * @param uSel The selector value. 4262 4261 * @param fAllowSysDesc Whether system descriptors are OK or not. 4263 4262 * @param pDesc Where to return the descriptor on success. 4264 4263 */ 4265 static VBOXSTRICTRC iemCImpl_LoadDescHelper(P IEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)4264 static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPU pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc) 4266 4265 { 4267 4266 pDesc->Long.au64[0] = 0; … … 4272 4271 4273 4272 /* Within the table limits? */ 4274 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4273 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4275 4274 RTGCPTR GCPtrBase; 4276 4275 if (uSel & X86_SEL_LDT) … … 4289 4288 4290 4289 /* Fetch the descriptor. */ 4291 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(p IemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));4290 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK)); 4292 4291 if (rcStrict != VINF_SUCCESS) 4293 4292 return rcStrict; … … 4298 4297 if (CPUMIsGuestInLongModeEx(pCtx)) 4299 4298 { 4300 rcStrict = iemMemFetchSysU64(p IemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);4299 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8); 4301 4300 if (rcStrict != VINF_SUCCESS) 4302 4301 return rcStrict; … … 4314 4313 IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite) 4315 4314 { 4316 Assert(!IEM_IS_REAL_OR_V86_MODE(p IemCpu));4315 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu)); 4317 4316 4318 4317 /** @todo figure whether the accessed bit is set or not. */ … … 4320 4319 bool fAccessible = true; 4321 4320 IEMSELDESC Desc; 4322 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(p IemCpu, uSel, false /*fAllowSysDesc*/, &Desc);4321 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc); 4323 4322 if (rcStrict == VINF_SUCCESS) 4324 4323 { … … 4340 4339 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) 4341 4340 fAccessible = false; 4342 else if (p IemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)4341 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl) 4343 4342 fAccessible = false; 4344 4343 } … … 4352 4351 4353 4352 /* commit */ 4354 p IemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible;4355 4356 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4353 pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible; 4354 4355 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4357 4356 return VINF_SUCCESS; 4358 4357 } … … 4370 4369 IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar) 4371 4370 { 4372 Assert(!IEM_IS_REAL_OR_V86_MODE(p IemCpu));4371 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu)); 4373 4372 4374 4373 /** @todo figure whether the accessed bit is set or not. */ … … 4376 4375 bool fDescOk = true; 4377 4376 IEMSELDESC Desc; 4378 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(p IemCpu, uSel, false /*fAllowSysDesc*/, &Desc);4377 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc); 4379 4378 if (rcStrict == VINF_SUCCESS) 4380 4379 { … … 4384 4383 if (!Desc.Legacy.Gen.u1DescType) 4385 4384 { 4386 if (CPUMIsGuestInLongModeEx(p IemCpu->CTX_SUFF(pCtx)))4385 if (CPUMIsGuestInLongModeEx(pVCpu->iem.s.CTX_SUFF(pCtx))) 4387 4386 { 4388 4387 if (Desc.Long.Gen.u5Zeros) … … 4436 4435 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) 4437 4436 fDescOk = false; 4438 else if (p IemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)4437 else if (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl) 4439 4438 fDescOk = false; 4440 4439 } … … 4459 4458 4460 4459 /* commit flags value and advance rip. */ 4461 p IemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk;4462 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4460 pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk; 4461 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4463 4462 4464 4463 return VINF_SUCCESS; … … 4493 4492 IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize) 4494 4493 { 4495 if (p IemCpu->uCpl != 0)4496 return iemRaiseGeneralProtectionFault0(p IemCpu);4497 Assert(!p IemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);4494 if (pVCpu->iem.s.uCpl != 0) 4495 return iemRaiseGeneralProtectionFault0(pVCpu); 4496 Assert(!pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.Bits.u1VM); 4498 4497 4499 4498 /* … … 4502 4501 uint16_t cbLimit; 4503 4502 RTGCPTR GCPtrBase; 4504 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(p IemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);4503 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize); 4505 4504 if (rcStrict == VINF_SUCCESS) 4506 4505 { 4507 if ( p IemCpu->enmCpuMode != IEMMODE_64BIT4506 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT 4508 4507 || X86_IS_CANONICAL(GCPtrBase)) 4509 4508 { 4510 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))4511 rcStrict = CPUMSetGuestGDTR( IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);4509 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4510 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit); 4512 4511 else 4513 4512 { 4514 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4513 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4515 4514 pCtx->gdtr.cbGdt = cbLimit; 4516 4515 pCtx->gdtr.pGdt = GCPtrBase; 4517 4516 } 4518 4517 if (rcStrict == VINF_SUCCESS) 4519 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4518 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4520 4519 } 4521 4520 else 4522 4521 { 4523 4522 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase)); 4524 return iemRaiseGeneralProtectionFault0(p IemCpu);4523 return iemRaiseGeneralProtectionFault0(pVCpu); 4525 4524 } 4526 4525 } … … 4542 4541 * you really must know. 4543 4542 */ 4544 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4545 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(p IemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst);4543 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4544 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst); 4546 4545 if (rcStrict == VINF_SUCCESS) 4547 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4546 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4548 4547 return rcStrict; 4549 4548 } … … 4559 4558 IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize) 4560 4559 { 4561 if (p IemCpu->uCpl != 0)4562 return iemRaiseGeneralProtectionFault0(p IemCpu);4563 Assert(!p IemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);4560 if (pVCpu->iem.s.uCpl != 0) 4561 return iemRaiseGeneralProtectionFault0(pVCpu); 4562 Assert(!pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.Bits.u1VM); 4564 4563 4565 4564 /* … … 4568 4567 uint16_t cbLimit; 4569 4568 RTGCPTR GCPtrBase; 4570 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(p IemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);4569 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize); 4571 4570 if (rcStrict == VINF_SUCCESS) 4572 4571 { 4573 if ( p IemCpu->enmCpuMode != IEMMODE_64BIT4572 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT 4574 4573 || X86_IS_CANONICAL(GCPtrBase)) 4575 4574 { 4576 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))4577 CPUMSetGuestIDTR( IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);4575 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4576 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit); 4578 4577 else 4579 4578 { 4580 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4579 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4581 4580 pCtx->idtr.cbIdt = cbLimit; 4582 4581 pCtx->idtr.pIdt = GCPtrBase; 4583 4582 } 4584 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4583 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4585 4584 } 4586 4585 else 4587 4586 { 4588 4587 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase)); 4589 return iemRaiseGeneralProtectionFault0(p IemCpu);4588 return iemRaiseGeneralProtectionFault0(pVCpu); 4590 4589 } 4591 4590 } … … 4607 4606 * you really must know. 4608 4607 */ 4609 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4610 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(p IemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst);4608 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4609 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst); 4611 4610 if (rcStrict == VINF_SUCCESS) 4612 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4611 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4613 4612 return rcStrict; 4614 4613 } … … 4622 4621 IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt) 4623 4622 { 4624 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4623 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4625 4624 4626 4625 /* 4627 4626 * Check preconditions. 4628 4627 */ 4629 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))4628 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 4630 4629 { 4631 4630 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt)); 4632 return iemRaiseUndefinedOpcode(p IemCpu);4633 } 4634 if (p IemCpu->uCpl != 0)4635 { 4636 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, p IemCpu->uCpl));4637 return iemRaiseGeneralProtectionFault0(p IemCpu);4631 return iemRaiseUndefinedOpcode(pVCpu); 4632 } 4633 if (pVCpu->iem.s.uCpl != 0) 4634 { 4635 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pVCpu->iem.s.uCpl)); 4636 return iemRaiseGeneralProtectionFault0(pVCpu); 4638 4637 } 4639 4638 if (uNewLdt & X86_SEL_LDT) 4640 4639 { 4641 4640 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt)); 4642 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewLdt);4641 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt); 4643 4642 } 4644 4643 … … 4649 4648 { 4650 4649 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt)); 4651 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))4652 CPUMSetGuestLDTR( IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);4650 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4651 CPUMSetGuestLDTR(pVCpu, uNewLdt); 4653 4652 else 4654 4653 pCtx->ldtr.Sel = uNewLdt; 4655 4654 pCtx->ldtr.ValidSel = uNewLdt; 4656 4655 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 4657 if (IEM_FULL_VERIFICATION_REM_ENABLED(p IemCpu))4656 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 4658 4657 { 4659 4658 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE; 4660 4659 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */ 4661 4660 } 4662 else if (IEM_IS_GUEST_CPU_AMD(p IemCpu))4661 else if (IEM_IS_GUEST_CPU_AMD(pVCpu)) 4663 4662 { 4664 4663 /* AMD-V seems to leave the base and limit alone. */ 4665 4664 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE; 4666 4665 } 4667 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(p IemCpu))4666 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 4668 4667 { 4669 4668 /* VT-x (Intel 3960x) seems to be doing the following. */ … … 4673 4672 } 4674 4673 4675 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4674 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4676 4675 return VINF_SUCCESS; 4677 4676 } … … 4681 4680 */ 4682 4681 IEMSELDESC Desc; 4683 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(p IemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */4682 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */ 4684 4683 if (rcStrict != VINF_SUCCESS) 4685 4684 return rcStrict; … … 4689 4688 { 4690 4689 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type)); 4691 return iemRaiseGeneralProtectionFault(p IemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);4690 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4692 4691 } 4693 4692 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT) 4694 4693 { 4695 4694 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type)); 4696 return iemRaiseGeneralProtectionFault(p IemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);4695 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4697 4696 } 4698 4697 uint64_t u64Base; 4699 if (!IEM_IS_LONG_MODE(p IemCpu))4698 if (!IEM_IS_LONG_MODE(pVCpu)) 4700 4699 u64Base = X86DESC_BASE(&Desc.Legacy); 4701 4700 else … … 4704 4703 { 4705 4704 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros)); 4706 return iemRaiseGeneralProtectionFault(p IemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);4705 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4707 4706 } 4708 4707 … … 4711 4710 { 4712 4711 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base)); 4713 return iemRaiseGeneralProtectionFault(p IemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);4712 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4714 4713 } 4715 4714 } … … 4719 4718 { 4720 4719 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt)); 4721 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uNewLdt);4720 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt); 4722 4721 } 4723 4722 … … 4726 4725 */ 4727 4726 /** @todo check if the actual value is loaded or if the RPL is dropped */ 4728 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))4729 CPUMSetGuestLDTR( IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);4727 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4728 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4730 4729 else 4731 4730 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL; … … 4736 4735 pCtx->ldtr.u64Base = u64Base; 4737 4736 4738 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4737 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4739 4738 return VINF_SUCCESS; 4740 4739 } … … 4748 4747 IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr) 4749 4748 { 4750 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4749 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4751 4750 4752 4751 /* 4753 4752 * Check preconditions. 4754 4753 */ 4755 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))4754 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 4756 4755 { 4757 4756 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr)); 4758 return iemRaiseUndefinedOpcode(p IemCpu);4759 } 4760 if (p IemCpu->uCpl != 0)4761 { 4762 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, p IemCpu->uCpl));4763 return iemRaiseGeneralProtectionFault0(p IemCpu);4757 return iemRaiseUndefinedOpcode(pVCpu); 4758 } 4759 if (pVCpu->iem.s.uCpl != 0) 4760 { 4761 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pVCpu->iem.s.uCpl)); 4762 return iemRaiseGeneralProtectionFault0(pVCpu); 4764 4763 } 4765 4764 if (uNewTr & X86_SEL_LDT) 4766 4765 { 4767 4766 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr)); 4768 return iemRaiseGeneralProtectionFaultBySelector(p IemCpu, uNewTr);4767 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr); 4769 4768 } 4770 4769 if (!(uNewTr & X86_SEL_MASK_OFF_RPL)) 4771 4770 { 4772 4771 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr)); 4773 return iemRaiseGeneralProtectionFault0(p IemCpu);4772 return iemRaiseGeneralProtectionFault0(pVCpu); 4774 4773 } 4775 4774 … … 4778 4777 */ 4779 4778 IEMSELDESC Desc; 4780 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(p IemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */4779 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */ 4781 4780 if (rcStrict != VINF_SUCCESS) 4782 4781 return rcStrict; … … 4786 4785 { 4787 4786 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type)); 4788 return iemRaiseGeneralProtectionFault(p IemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);4787 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 4789 4788 } 4790 4789 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */ 4791 4790 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL 4792 || IEM_IS_LONG_MODE(p IemCpu)) )4791 || IEM_IS_LONG_MODE(pVCpu)) ) 4793 4792 { 4794 4793 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type)); 4795 return iemRaiseGeneralProtectionFault(p IemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);4794 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 4796 4795 } 4797 4796 uint64_t u64Base; 4798 if (!IEM_IS_LONG_MODE(p IemCpu))4797 if (!IEM_IS_LONG_MODE(pVCpu)) 4799 4798 u64Base = X86DESC_BASE(&Desc.Legacy); 4800 4799 else … … 4803 4802 { 4804 4803 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros)); 4805 return iemRaiseGeneralProtectionFault(p IemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);4804 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 4806 4805 } 4807 4806 … … 4810 4809 { 4811 4810 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base)); 4812 return iemRaiseGeneralProtectionFault(p IemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);4811 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 4813 4812 } 4814 4813 } … … 4818 4817 { 4819 4818 Log(("ltr %#x - segment not present -> #NP\n", uNewTr)); 4820 return iemRaiseSelectorNotPresentBySelector(p IemCpu, uNewTr);4819 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr); 4821 4820 } 4822 4821 … … 4828 4827 */ 4829 4828 void *pvDesc; 4830 rcStrict = iemMemMap(p IemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);4829 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW); 4831 4830 if (rcStrict != VINF_SUCCESS) 4832 4831 return rcStrict; … … 4838 4837 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break; 4839 4838 } 4840 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvDesc, IEM_ACCESS_DATA_RW);4839 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW); 4841 4840 if (rcStrict != VINF_SUCCESS) 4842 4841 return rcStrict; … … 4847 4846 */ 4848 4847 /** @todo check if the actual value is loaded or if the RPL is dropped */ 4849 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))4850 CPUMSetGuestTR( IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);4848 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4849 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 4851 4850 else 4852 4851 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL; … … 4857 4856 pCtx->tr.u64Base = u64Base; 4858 4857 4859 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4858 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4860 4859 return VINF_SUCCESS; 4861 4860 } … … 4870 4869 IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg) 4871 4870 { 4872 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);4873 if (p IemCpu->uCpl != 0)4874 return iemRaiseGeneralProtectionFault0(p IemCpu);4871 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4872 if (pVCpu->iem.s.uCpl != 0) 4873 return iemRaiseGeneralProtectionFault0(pVCpu); 4875 4874 Assert(!pCtx->eflags.Bits.u1VM); 4876 4875 … … 4881 4880 case 0: 4882 4881 crX = pCtx->cr0; 4883 if (IEM_GET_TARGET_CPU(p IemCpu) <= IEMTARGETCPU_386)4882 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386) 4884 4883 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */ 4885 4884 break; … … 4890 4889 { 4891 4890 uint8_t uTpr; 4892 int rc = PDMApicGetTPR( IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);4891 int rc = PDMApicGetTPR(pVCpu, &uTpr, NULL, NULL); 4893 4892 if (RT_SUCCESS(rc)) 4894 4893 crX = uTpr >> 4; … … 4901 4900 4902 4901 /* store it */ 4903 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)4904 *(uint64_t *)iemGRegRef(p IemCpu, iGReg) = crX;4902 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 4903 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = crX; 4905 4904 else 4906 *(uint64_t *)iemGRegRef(p IemCpu, iGReg) = (uint32_t)crX;4907 4908 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);4905 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)crX; 4906 4907 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4909 4908 return VINF_SUCCESS; 4910 4909 } … … 4919 4918 IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX) 4920 4919 { 4921 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 4922 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 4920 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 4923 4921 VBOXSTRICTRC rcStrict; 4924 4922 int rc; … … 4941 4939 4942 4940 /* ET is hardcoded on 486 and later. */ 4943 if (IEM_GET_TARGET_CPU(p IemCpu) > IEMTARGETCPU_486)4941 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486) 4944 4942 uNewCrX |= X86_CR0_ET; 4945 4943 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */ 4946 else if (IEM_GET_TARGET_CPU(p IemCpu) == IEMTARGETCPU_486)4944 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486) 4947 4945 { 4948 4946 uNewCrX &= fValid; … … 4956 4954 { 4957 4955 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid)); 4958 return iemRaiseGeneralProtectionFault0(p IemCpu);4956 return iemRaiseGeneralProtectionFault0(pVCpu); 4959 4957 } 4960 4958 … … 4964 4962 { 4965 4963 Log(("Trying to set CR0.PG without CR0.PE\n")); 4966 return iemRaiseGeneralProtectionFault0(p IemCpu);4964 return iemRaiseGeneralProtectionFault0(pVCpu); 4967 4965 } 4968 4966 … … 4971 4969 { 4972 4970 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n")); 4973 return iemRaiseGeneralProtectionFault0(p IemCpu);4971 return iemRaiseGeneralProtectionFault0(pVCpu); 4974 4972 } 4975 4973 … … 4982 4980 { 4983 4981 Log(("Trying to enabled long mode paging without CR4.PAE set\n")); 4984 return iemRaiseGeneralProtectionFault0(p IemCpu);4982 return iemRaiseGeneralProtectionFault0(pVCpu); 4985 4983 } 4986 4984 if (pCtx->cs.Attr.n.u1Long) 4987 4985 { 4988 4986 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n")); 4989 return iemRaiseGeneralProtectionFault0(p IemCpu);4987 return iemRaiseGeneralProtectionFault0(pVCpu); 4990 4988 } 4991 4989 } … … 4996 4994 * Change CR0. 4997 4995 */ 4998 if (!IEM_VERIFICATION_ENABLED(p IemCpu))4996 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 4999 4997 CPUMSetGuestCR0(pVCpu, uNewCrX); 5000 4998 else … … 5014 5012 NewEFER &= ~MSR_K6_EFER_LMA; 5015 5013 5016 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))5014 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5017 5015 CPUMSetGuestEFER(pVCpu, NewEFER); 5018 5016 else … … 5024 5022 * Inform PGM. 5025 5023 */ 5026 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))5024 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5027 5025 { 5028 5026 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) … … 5071 5069 { 5072 5070 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX)); 5073 return iemRaiseGeneralProtectionFault0(p IemCpu);5071 return iemRaiseGeneralProtectionFault0(pVCpu); 5074 5072 } 5075 5073 … … 5091 5089 5092 5090 /* Make the change. */ 5093 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))5091 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5094 5092 { 5095 5093 rc = CPUMSetGuestCR3(pVCpu, uNewCrX); … … 5100 5098 5101 5099 /* Inform PGM. */ 5102 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))5100 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5103 5101 { 5104 5102 if (pCtx->cr0 & X86_CR0_PG) … … 5133 5131 //if (xxx) 5134 5132 // fValid |= X86_CR4_VMXE; 5135 if (IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fXSaveRstor)5133 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 5136 5134 fValid |= X86_CR4_OSXSAVE; 5137 5135 if (uNewCrX & ~(uint64_t)fValid) 5138 5136 { 5139 5137 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid)); 5140 return iemRaiseGeneralProtectionFault0(p IemCpu);5138 return iemRaiseGeneralProtectionFault0(pVCpu); 5141 5139 } 5142 5140 … … 5147 5145 { 5148 5146 Log(("Trying to set clear CR4.PAE while long mode is active\n")); 5149 return iemRaiseGeneralProtectionFault0(p IemCpu);5147 return iemRaiseGeneralProtectionFault0(pVCpu); 5150 5148 } 5151 5149 … … 5154 5152 * Change it. 5155 5153 */ 5156 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))5154 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5157 5155 { 5158 5156 rc = CPUMSetGuestCR4(pVCpu, uNewCrX); … … 5166 5164 * Notify SELM and PGM. 5167 5165 */ 5168 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))5166 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5169 5167 { 5170 5168 /* SELM - VME may change things wrt to the TSS shadowing. */ … … 5174 5172 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) )); 5175 5173 #ifdef VBOX_WITH_RAW_MODE 5176 if (!HMIsEnabled( IEMCPU_TO_VM(pIemCpu)))5174 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 5177 5175 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 5178 5176 #endif … … 5200 5198 { 5201 5199 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX)); 5202 return iemRaiseGeneralProtectionFault0(p IemCpu);5200 return iemRaiseGeneralProtectionFault0(pVCpu); 5203 5201 } 5204 5202 5205 if (!IEM_FULL_VERIFICATION_ENABLED(p IemCpu))5206 PDMApicSetTPR( IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);5203 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5204 PDMApicSetTPR(pVCpu, (uint8_t)uNewCrX << 4); 5207 5205 rcStrict = VINF_SUCCESS; 5208 5206 break; … … 5217 5215 { 5218 5216 if (rcStrict != VINF_SUCCESS) 5219 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);5220 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5217 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 5218 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5221 5219 } 5222 5220 … … 5233 5231 IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg) 5234 5232 { 5235 if (p IemCpu->uCpl != 0)5236 return iemRaiseGeneralProtectionFault0(p IemCpu);5237 Assert(!p IemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);5233 if (pVCpu->iem.s.uCpl != 0) 5234 return iemRaiseGeneralProtectionFault0(pVCpu); 5235 Assert(!pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.Bits.u1VM); 5238 5236 5239 5237 /* … … 5241 5239 */ 5242 5240 uint64_t uNewCrX; 5243 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5244 uNewCrX = iemGRegFetchU64(p IemCpu, iGReg);5241 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5242 uNewCrX = iemGRegFetchU64(pVCpu, iGReg); 5245 5243 else 5246 uNewCrX = iemGRegFetchU32(p IemCpu, iGReg);5244 uNewCrX = iemGRegFetchU32(pVCpu, iGReg); 5247 5245 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX); 5248 5246 } … … 5256 5254 IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw) 5257 5255 { 5258 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5259 5260 if (p IemCpu->uCpl != 0)5261 return iemRaiseGeneralProtectionFault0(p IemCpu);5256 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5257 5258 if (pVCpu->iem.s.uCpl != 0) 5259 return iemRaiseGeneralProtectionFault0(pVCpu); 5262 5260 Assert(!pCtx->eflags.Bits.u1VM); 5263 5261 … … 5276 5274 IEM_CIMPL_DEF_0(iemCImpl_clts) 5277 5275 { 5278 if (p IemCpu->uCpl != 0)5279 return iemRaiseGeneralProtectionFault0(p IemCpu);5280 5281 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5276 if (pVCpu->iem.s.uCpl != 0) 5277 return iemRaiseGeneralProtectionFault0(pVCpu); 5278 5279 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5282 5280 uint64_t uNewCr0 = pCtx->cr0; 5283 5281 uNewCr0 &= ~X86_CR0_TS; … … 5294 5292 IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg) 5295 5293 { 5296 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5294 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5297 5295 5298 5296 /* … … 5301 5299 5302 5300 /* Raise GPs. */ 5303 if (p IemCpu->uCpl != 0)5304 return iemRaiseGeneralProtectionFault0(p IemCpu);5301 if (pVCpu->iem.s.uCpl != 0) 5302 return iemRaiseGeneralProtectionFault0(pVCpu); 5305 5303 Assert(!pCtx->eflags.Bits.u1VM); 5306 5304 … … 5309 5307 { 5310 5308 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg)); 5311 return iemRaiseGeneralProtectionFault0(p IemCpu);5309 return iemRaiseGeneralProtectionFault0(pVCpu); 5312 5310 } 5313 5311 … … 5316 5314 { 5317 5315 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg)); 5318 return iemRaiseDebugException(p IemCpu);5316 return iemRaiseDebugException(pVCpu); 5319 5317 } 5320 5318 … … 5344 5342 } 5345 5343 5346 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5347 *(uint64_t *)iemGRegRef(p IemCpu, iGReg) = drX;5344 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5345 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = drX; 5348 5346 else 5349 *(uint64_t *)iemGRegRef(p IemCpu, iGReg) = (uint32_t)drX;5350 5351 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5347 *(uint64_t *)iemGRegRef(pVCpu, iGReg) = (uint32_t)drX; 5348 5349 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5352 5350 return VINF_SUCCESS; 5353 5351 } … … 5362 5360 IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg) 5363 5361 { 5364 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5362 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5365 5363 5366 5364 /* 5367 5365 * Check preconditions. 5368 5366 */ 5369 if (p IemCpu->uCpl != 0)5370 return iemRaiseGeneralProtectionFault0(p IemCpu);5367 if (pVCpu->iem.s.uCpl != 0) 5368 return iemRaiseGeneralProtectionFault0(pVCpu); 5371 5369 Assert(!pCtx->eflags.Bits.u1VM); 5372 5370 … … 5376 5374 { 5377 5375 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg)); 5378 return iemRaiseGeneralProtectionFault0(p IemCpu);5376 return iemRaiseGeneralProtectionFault0(pVCpu); 5379 5377 } 5380 5378 iDrReg += 2; … … 5387 5385 { 5388 5386 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg)); 5389 return iemRaiseDebugException(p IemCpu);5387 return iemRaiseDebugException(pVCpu); 5390 5388 } 5391 5389 … … 5394 5392 */ 5395 5393 uint64_t uNewDrX; 5396 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)5397 uNewDrX = iemGRegFetchU64(p IemCpu, iGReg);5394 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 5395 uNewDrX = iemGRegFetchU64(pVCpu, iGReg); 5398 5396 else 5399 uNewDrX = iemGRegFetchU32(p IemCpu, iGReg);5397 uNewDrX = iemGRegFetchU32(pVCpu, iGReg); 5400 5398 5401 5399 /* … … 5415 5413 { 5416 5414 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX)); 5417 return iemRaiseGeneralProtectionFault0(p IemCpu);5415 return iemRaiseGeneralProtectionFault0(pVCpu); 5418 5416 } 5419 5417 uNewDrX |= X86_DR6_RA1_MASK; … … 5425 5423 { 5426 5424 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX)); 5427 return iemRaiseGeneralProtectionFault0(p IemCpu);5425 return iemRaiseGeneralProtectionFault0(pVCpu); 5428 5426 } 5429 5427 uNewDrX |= X86_DR7_RA1_MASK; … … 5437 5435 * Do the actual setting. 5438 5436 */ 5439 if (!IEM_VERIFICATION_ENABLED(p IemCpu))5440 { 5441 int rc = CPUMSetGuestDRx( IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);5437 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5438 { 5439 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX); 5442 5440 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc); 5443 5441 } … … 5445 5443 pCtx->dr[iDrReg] = uNewDrX; 5446 5444 5447 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5445 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5448 5446 return VINF_SUCCESS; 5449 5447 } … … 5459 5457 { 5460 5458 /* ring-0 only. */ 5461 if (p IemCpu->uCpl != 0)5462 return iemRaiseGeneralProtectionFault0(p IemCpu);5463 Assert(!p IemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);5464 5465 int rc = PGMInvalidatePage( IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);5466 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5459 if (pVCpu->iem.s.uCpl != 0) 5460 return iemRaiseGeneralProtectionFault0(pVCpu); 5461 Assert(!pVCpu->iem.s.CTX_SUFF(pCtx)->eflags.Bits.u1VM); 5462 5463 int rc = PGMInvalidatePage(pVCpu, GCPtrPage); 5464 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5467 5465 5468 5466 if (rc == VINF_SUCCESS) 5469 5467 return VINF_SUCCESS; 5470 5468 if (rc == VINF_PGM_SYNC_CR3) 5471 return iemSetPassUpStatus(p IemCpu, rc);5469 return iemSetPassUpStatus(pVCpu, rc); 5472 5470 5473 5471 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc)); … … 5482 5480 IEM_CIMPL_DEF_0(iemCImpl_rdtsc) 5483 5481 { 5484 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5482 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5485 5483 5486 5484 /* 5487 5485 * Check preconditions. 5488 5486 */ 5489 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fTsc)5490 return iemRaiseUndefinedOpcode(p IemCpu);5487 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc) 5488 return iemRaiseUndefinedOpcode(pVCpu); 5491 5489 5492 5490 if ( (pCtx->cr4 & X86_CR4_TSD) 5493 && p IemCpu->uCpl != 0)5494 { 5495 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", p IemCpu->uCpl));5496 return iemRaiseGeneralProtectionFault0(p IemCpu);5491 && pVCpu->iem.s.uCpl != 0) 5492 { 5493 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 5494 return iemRaiseGeneralProtectionFault0(pVCpu); 5497 5495 } 5498 5496 … … 5500 5498 * Do the job. 5501 5499 */ 5502 uint64_t uTicks = TMCpuTickGet( IEMCPU_TO_VMCPU(pIemCpu));5500 uint64_t uTicks = TMCpuTickGet(pVCpu); 5503 5501 pCtx->rax = (uint32_t)uTicks; 5504 5502 pCtx->rdx = uTicks >> 32; 5505 5503 #ifdef IEM_VERIFICATION_MODE_FULL 5506 p IemCpu->fIgnoreRaxRdx = true;5504 pVCpu->iem.s.fIgnoreRaxRdx = true; 5507 5505 #endif 5508 5506 5509 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5507 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5510 5508 return VINF_SUCCESS; 5511 5509 } … … 5517 5515 IEM_CIMPL_DEF_0(iemCImpl_rdmsr) 5518 5516 { 5519 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5517 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5520 5518 5521 5519 /* 5522 5520 * Check preconditions. 5523 5521 */ 5524 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fMsr)5525 return iemRaiseUndefinedOpcode(p IemCpu);5526 if (p IemCpu->uCpl != 0)5527 return iemRaiseGeneralProtectionFault0(p IemCpu);5522 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr) 5523 return iemRaiseUndefinedOpcode(pVCpu); 5524 if (pVCpu->iem.s.uCpl != 0) 5525 return iemRaiseGeneralProtectionFault0(pVCpu); 5528 5526 5529 5527 /* … … 5531 5529 */ 5532 5530 RTUINT64U uValue; 5533 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr( IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);5531 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pCtx->ecx, &uValue.u); 5534 5532 if (rcStrict == VINF_SUCCESS) 5535 5533 { … … 5537 5535 pCtx->rdx = uValue.s.Hi; 5538 5536 5539 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5537 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5540 5538 return VINF_SUCCESS; 5541 5539 } … … 5557 5555 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pCtx->ecx)); 5558 5556 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS); 5559 return iemRaiseGeneralProtectionFault0(p IemCpu);5557 return iemRaiseGeneralProtectionFault0(pVCpu); 5560 5558 } 5561 5559 … … 5566 5564 IEM_CIMPL_DEF_0(iemCImpl_wrmsr) 5567 5565 { 5568 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5566 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5569 5567 5570 5568 /* 5571 5569 * Check preconditions. 5572 5570 */ 5573 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fMsr)5574 return iemRaiseUndefinedOpcode(p IemCpu);5575 if (p IemCpu->uCpl != 0)5576 return iemRaiseGeneralProtectionFault0(p IemCpu);5571 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr) 5572 return iemRaiseUndefinedOpcode(pVCpu); 5573 if (pVCpu->iem.s.uCpl != 0) 5574 return iemRaiseGeneralProtectionFault0(pVCpu); 5577 5575 5578 5576 /* … … 5584 5582 5585 5583 VBOXSTRICTRC rcStrict; 5586 if (!IEM_VERIFICATION_ENABLED(p IemCpu))5587 rcStrict = CPUMSetGuestMsr( IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);5584 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5585 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u); 5588 5586 else 5589 5587 { 5590 5588 #ifdef IN_RING3 5591 5589 CPUMCTX CtxTmp = *pCtx; 5592 rcStrict = CPUMSetGuestMsr( IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);5593 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr( IEMCPU_TO_VMCPU(pIemCpu));5590 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u); 5591 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(pVCpu); 5594 5592 *pCtx = *pCtx2; 5595 5593 *pCtx2 = CtxTmp; … … 5600 5598 if (rcStrict == VINF_SUCCESS) 5601 5599 { 5602 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5600 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5603 5601 return VINF_SUCCESS; 5604 5602 } … … 5620 5618 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo)); 5621 5619 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS); 5622 return iemRaiseGeneralProtectionFault0(p IemCpu);5620 return iemRaiseGeneralProtectionFault0(pVCpu); 5623 5621 } 5624 5622 … … 5632 5630 IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg) 5633 5631 { 5634 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5632 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5635 5633 5636 5634 /* 5637 5635 * CPL check 5638 5636 */ 5639 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(p IemCpu, pCtx, u16Port, cbReg);5637 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg); 5640 5638 if (rcStrict != VINF_SUCCESS) 5641 5639 return rcStrict; … … 5645 5643 */ 5646 5644 uint32_t u32Value; 5647 if (!IEM_VERIFICATION_ENABLED(p IemCpu))5648 rcStrict = IOMIOPortRead( IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);5645 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5646 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg); 5649 5647 else 5650 rcStrict = iemVerifyFakeIOPortRead(p IemCpu, u16Port, &u32Value, cbReg);5648 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, cbReg); 5651 5649 if (IOM_SUCCESS(rcStrict)) 5652 5650 { … … 5658 5656 default: AssertFailedReturn(VERR_IEM_IPE_3); 5659 5657 } 5660 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5661 p IemCpu->cPotentialExits++;5658 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5659 pVCpu->iem.s.cPotentialExits++; 5662 5660 if (rcStrict != VINF_SUCCESS) 5663 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);5661 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 5664 5662 Assert(rcStrict == VINF_SUCCESS); /* assumed below */ 5665 5663 … … 5671 5669 && X86_DR7_ANY_RW_IO(uDr7) 5672 5670 && (pCtx->cr4 & X86_CR4_DE)) 5673 || DBGFBpIsHwIoArmed( IEMCPU_TO_VM(pIemCpu))))5674 { 5675 rcStrict = DBGFBpCheckIo( IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);5671 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM)))) 5672 { 5673 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg); 5676 5674 if (rcStrict == VINF_EM_RAW_GUEST_TRAP) 5677 rcStrict = iemRaiseDebugException(p IemCpu);5675 rcStrict = iemRaiseDebugException(pVCpu); 5678 5676 } 5679 5677 } … … 5690 5688 IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg) 5691 5689 { 5692 return IEM_CIMPL_CALL_2(iemCImpl_in, p IemCpu->CTX_SUFF(pCtx)->dx, cbReg);5690 return IEM_CIMPL_CALL_2(iemCImpl_in, pVCpu->iem.s.CTX_SUFF(pCtx)->dx, cbReg); 5693 5691 } 5694 5692 … … 5702 5700 IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg) 5703 5701 { 5704 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5702 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5705 5703 5706 5704 /* 5707 5705 * CPL check 5708 5706 */ 5709 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(p IemCpu, pCtx, u16Port, cbReg);5707 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, cbReg); 5710 5708 if (rcStrict != VINF_SUCCESS) 5711 5709 return rcStrict; … … 5722 5720 default: AssertFailedReturn(VERR_IEM_IPE_4); 5723 5721 } 5724 if (!IEM_VERIFICATION_ENABLED(p IemCpu))5725 rcStrict = IOMIOPortWrite( IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);5722 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5723 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg); 5726 5724 else 5727 rcStrict = iemVerifyFakeIOPortWrite(p IemCpu, u16Port, u32Value, cbReg);5725 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, u32Value, cbReg); 5728 5726 if (IOM_SUCCESS(rcStrict)) 5729 5727 { 5730 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5731 p IemCpu->cPotentialExits++;5728 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5729 pVCpu->iem.s.cPotentialExits++; 5732 5730 if (rcStrict != VINF_SUCCESS) 5733 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);5731 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 5734 5732 Assert(rcStrict == VINF_SUCCESS); /* assumed below */ 5735 5733 … … 5741 5739 && X86_DR7_ANY_RW_IO(uDr7) 5742 5740 && (pCtx->cr4 & X86_CR4_DE)) 5743 || DBGFBpIsHwIoArmed( IEMCPU_TO_VM(pIemCpu))))5744 { 5745 rcStrict = DBGFBpCheckIo( IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), pCtx, u16Port, cbReg);5741 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM)))) 5742 { 5743 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg); 5746 5744 if (rcStrict == VINF_EM_RAW_GUEST_TRAP) 5747 rcStrict = iemRaiseDebugException(p IemCpu);5745 rcStrict = iemRaiseDebugException(pVCpu); 5748 5746 } 5749 5747 } … … 5759 5757 IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg) 5760 5758 { 5761 return IEM_CIMPL_CALL_2(iemCImpl_out, p IemCpu->CTX_SUFF(pCtx)->dx, cbReg);5759 return IEM_CIMPL_CALL_2(iemCImpl_out, pVCpu->iem.s.CTX_SUFF(pCtx)->dx, cbReg); 5762 5760 } 5763 5761 … … 5768 5766 IEM_CIMPL_DEF_0(iemCImpl_cli) 5769 5767 { 5770 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5771 uint32_t fEfl = IEMMISC_GET_EFL(p IemCpu, pCtx);5768 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5769 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 5772 5770 uint32_t const fEflOld = fEfl; 5773 5771 if (pCtx->cr0 & X86_CR0_PE) … … 5776 5774 if (!(fEfl & X86_EFL_VM)) 5777 5775 { 5778 if (p IemCpu->uCpl <= uIopl)5776 if (pVCpu->iem.s.uCpl <= uIopl) 5779 5777 fEfl &= ~X86_EFL_IF; 5780 else if ( p IemCpu->uCpl == 35778 else if ( pVCpu->iem.s.uCpl == 3 5781 5779 && (pCtx->cr4 & X86_CR4_PVI) ) 5782 5780 fEfl &= ~X86_EFL_VIF; 5783 5781 else 5784 return iemRaiseGeneralProtectionFault0(p IemCpu);5782 return iemRaiseGeneralProtectionFault0(pVCpu); 5785 5783 } 5786 5784 /* V8086 */ … … 5791 5789 fEfl &= ~X86_EFL_VIF; 5792 5790 else 5793 return iemRaiseGeneralProtectionFault0(p IemCpu);5791 return iemRaiseGeneralProtectionFault0(pVCpu); 5794 5792 } 5795 5793 /* real mode */ … … 5798 5796 5799 5797 /* Commit. */ 5800 IEMMISC_SET_EFL(p IemCpu, pCtx, fEfl);5801 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5798 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl); 5799 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5802 5800 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld); 5803 5801 return VINF_SUCCESS; … … 5810 5808 IEM_CIMPL_DEF_0(iemCImpl_sti) 5811 5809 { 5812 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5813 uint32_t fEfl = IEMMISC_GET_EFL(p IemCpu, pCtx);5810 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5811 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 5814 5812 uint32_t const fEflOld = fEfl; 5815 5813 … … 5819 5817 if (!(fEfl & X86_EFL_VM)) 5820 5818 { 5821 if (p IemCpu->uCpl <= uIopl)5819 if (pVCpu->iem.s.uCpl <= uIopl) 5822 5820 fEfl |= X86_EFL_IF; 5823 else if ( p IemCpu->uCpl == 35821 else if ( pVCpu->iem.s.uCpl == 3 5824 5822 && (pCtx->cr4 & X86_CR4_PVI) 5825 5823 && !(fEfl & X86_EFL_VIP) ) 5826 5824 fEfl |= X86_EFL_VIF; 5827 5825 else 5828 return iemRaiseGeneralProtectionFault0(p IemCpu);5826 return iemRaiseGeneralProtectionFault0(pVCpu); 5829 5827 } 5830 5828 /* V8086 */ … … 5836 5834 fEfl |= X86_EFL_VIF; 5837 5835 else 5838 return iemRaiseGeneralProtectionFault0(p IemCpu);5836 return iemRaiseGeneralProtectionFault0(pVCpu); 5839 5837 } 5840 5838 /* real mode */ … … 5843 5841 5844 5842 /* Commit. */ 5845 IEMMISC_SET_EFL(p IemCpu, pCtx, fEfl);5846 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5847 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(p IemCpu))5848 EMSetInhibitInterruptsPC( IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);5843 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl); 5844 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5845 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 5846 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 5849 5847 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl)); 5850 5848 return VINF_SUCCESS; … … 5857 5855 IEM_CIMPL_DEF_0(iemCImpl_hlt) 5858 5856 { 5859 if (p IemCpu->uCpl != 0)5860 return iemRaiseGeneralProtectionFault0(p IemCpu);5861 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5857 if (pVCpu->iem.s.uCpl != 0) 5858 return iemRaiseGeneralProtectionFault0(pVCpu); 5859 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5862 5860 return VINF_EM_HALT; 5863 5861 } … … 5872 5870 * Permission checks. 5873 5871 */ 5874 if (p IemCpu->uCpl != 0)5872 if (pVCpu->iem.s.uCpl != 0) 5875 5873 { 5876 5874 Log2(("monitor: CPL != 0\n")); 5877 return iemRaiseUndefinedOpcode(p IemCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */5878 } 5879 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fMonitorMWait)5875 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */ 5876 } 5877 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait) 5880 5878 { 5881 5879 Log2(("monitor: Not in CPUID\n")); 5882 return iemRaiseUndefinedOpcode(p IemCpu);5880 return iemRaiseUndefinedOpcode(pVCpu); 5883 5881 } 5884 5882 … … 5886 5884 * Gather the operands and validate them. 5887 5885 */ 5888 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5889 RTGCPTR GCPtrMem = p IemCpu->enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;5886 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5887 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5890 5888 uint32_t uEcx = pCtx->ecx; 5891 5889 uint32_t uEdx = pCtx->edx; … … 5895 5893 { 5896 5894 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx); 5897 return iemRaiseGeneralProtectionFault0(p IemCpu);5898 } 5899 5900 VBOXSTRICTRC rcStrict = iemMemApplySegment(p IemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);5895 return iemRaiseGeneralProtectionFault0(pVCpu); 5896 } 5897 5898 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem); 5901 5899 if (rcStrict != VINF_SUCCESS) 5902 5900 return rcStrict; 5903 5901 5904 5902 RTGCPHYS GCPhysMem; 5905 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);5903 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem); 5906 5904 if (rcStrict != VINF_SUCCESS) 5907 5905 return rcStrict; … … 5910 5908 * Call EM to prepare the monitor/wait. 5911 5909 */ 5912 rcStrict = EMMonitorWaitPrepare( IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);5910 rcStrict = EMMonitorWaitPrepare(pVCpu, pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem); 5913 5911 Assert(rcStrict == VINF_SUCCESS); 5914 5912 5915 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5913 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5916 5914 return rcStrict; 5917 5915 } … … 5926 5924 * Permission checks. 5927 5925 */ 5928 if (p IemCpu->uCpl != 0)5926 if (pVCpu->iem.s.uCpl != 0) 5929 5927 { 5930 5928 Log2(("mwait: CPL != 0\n")); 5931 5929 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check 5932 5930 * EFLAGS.VM then.) */ 5933 return iemRaiseUndefinedOpcode(p IemCpu);5934 } 5935 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fMonitorMWait)5931 return iemRaiseUndefinedOpcode(pVCpu); 5932 } 5933 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait) 5936 5934 { 5937 5935 Log2(("mwait: Not in CPUID\n")); 5938 return iemRaiseUndefinedOpcode(p IemCpu);5936 return iemRaiseUndefinedOpcode(pVCpu); 5939 5937 } 5940 5938 … … 5942 5940 * Gather the operands and validate them. 5943 5941 */ 5944 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5942 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5945 5943 uint32_t uEax = pCtx->eax; 5946 5944 uint32_t uEcx = pCtx->ecx; … … 5951 5949 { 5952 5950 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx)); 5953 return iemRaiseGeneralProtectionFault0(p IemCpu);5951 return iemRaiseGeneralProtectionFault0(pVCpu); 5954 5952 } 5955 5953 uint32_t fMWaitFeatures = 0; 5956 5954 uint32_t uIgnore = 0; 5957 CPUMGetGuestCpuId( IEMCPU_TO_VMCPU(pIemCpu), 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);5955 CPUMGetGuestCpuId(pVCpu, 5, 0, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore); 5958 5956 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0)) 5959 5957 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0)) 5960 5958 { 5961 5959 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx)); 5962 return iemRaiseGeneralProtectionFault0(p IemCpu);5960 return iemRaiseGeneralProtectionFault0(pVCpu); 5963 5961 } 5964 5962 } … … 5967 5965 * Call EM to prepare the monitor/wait. 5968 5966 */ 5969 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform( IEMCPU_TO_VMCPU(pIemCpu), uEax, uEcx);5970 5971 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5967 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx); 5968 5969 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5972 5970 return rcStrict; 5973 5971 } … … 5979 5977 IEM_CIMPL_DEF_0(iemCImpl_swapgs) 5980 5978 { 5981 Assert(p IemCpu->enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */5979 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); /* Caller checks this. */ 5982 5980 5983 5981 /* 5984 5982 * Permission checks. 5985 5983 */ 5986 if (p IemCpu->uCpl != 0)5984 if (pVCpu->iem.s.uCpl != 0) 5987 5985 { 5988 5986 Log2(("swapgs: CPL != 0\n")); 5989 return iemRaiseUndefinedOpcode(p IemCpu);5987 return iemRaiseUndefinedOpcode(pVCpu); 5990 5988 } 5991 5989 … … 5993 5991 * Do the job. 5994 5992 */ 5995 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);5993 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 5996 5994 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE; 5997 5995 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base; 5998 5996 pCtx->gs.u64Base = uOtherGsBase; 5999 5997 6000 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);5998 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6001 5999 return VINF_SUCCESS; 6002 6000 } … … 6008 6006 IEM_CIMPL_DEF_0(iemCImpl_cpuid) 6009 6007 { 6010 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6011 6012 CPUMGetGuestCpuId( IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);6008 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6009 6010 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx); 6013 6011 pCtx->rax &= UINT32_C(0xffffffff); 6014 6012 pCtx->rbx &= UINT32_C(0xffffffff); … … 6016 6014 pCtx->rdx &= UINT32_C(0xffffffff); 6017 6015 6018 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6016 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6019 6017 return VINF_SUCCESS; 6020 6018 } … … 6028 6026 IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm) 6029 6027 { 6030 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6028 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6031 6029 6032 6030 uint16_t const ax = pCtx->ax; 6033 6031 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm; 6034 6032 pCtx->ax = al; 6035 iemHlpUpdateArithEFlagsU8(p IemCpu, al,6033 iemHlpUpdateArithEFlagsU8(pVCpu, al, 6036 6034 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, 6037 6035 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF); 6038 6036 6039 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6037 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6040 6038 return VINF_SUCCESS; 6041 6039 } … … 6049 6047 IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm) 6050 6048 { 6051 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6049 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6052 6050 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */ 6053 6051 … … 6056 6054 uint8_t const ah = (uint8_t)ax / bImm; 6057 6055 pCtx->ax = (ah << 8) + al; 6058 iemHlpUpdateArithEFlagsU8(p IemCpu, al,6056 iemHlpUpdateArithEFlagsU8(pVCpu, al, 6059 6057 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, 6060 6058 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF); 6061 6059 6062 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6060 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6063 6061 return VINF_SUCCESS; 6064 6062 } … … 6070 6068 IEM_CIMPL_DEF_0(iemCImpl_daa) 6071 6069 { 6072 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6070 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6073 6071 6074 6072 uint8_t const al = pCtx->al; … … 6092 6090 pCtx->eflags.Bits.u1CF = 0; 6093 6091 6094 iemHlpUpdateArithEFlagsU8(p IemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6095 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6092 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6093 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6096 6094 return VINF_SUCCESS; 6097 6095 } … … 6103 6101 IEM_CIMPL_DEF_0(iemCImpl_das) 6104 6102 { 6105 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6103 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6106 6104 6107 6105 uint8_t const uInputAL = pCtx->al; … … 6128 6126 } 6129 6127 6130 iemHlpUpdateArithEFlagsU8(p IemCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6131 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6128 iemHlpUpdateArithEFlagsU8(pVCpu, pCtx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6129 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6132 6130 return VINF_SUCCESS; 6133 6131 } … … 6182 6180 IEM_CIMPL_DEF_0(iemCImpl_xgetbv) 6183 6181 { 6184 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6182 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6185 6183 if (pCtx->cr4 & X86_CR4_OSXSAVE) 6186 6184 { … … 6194 6192 default: 6195 6193 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx)); 6196 return iemRaiseGeneralProtectionFault0(p IemCpu);6194 return iemRaiseGeneralProtectionFault0(pVCpu); 6197 6195 6198 6196 } … … 6200 6198 pCtx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]); 6201 6199 6202 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6200 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6203 6201 return VINF_SUCCESS; 6204 6202 } 6205 6203 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n")); 6206 return iemRaiseUndefinedOpcode(p IemCpu);6204 return iemRaiseUndefinedOpcode(pVCpu); 6207 6205 } 6208 6206 … … 6213 6211 IEM_CIMPL_DEF_0(iemCImpl_xsetbv) 6214 6212 { 6215 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6213 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6216 6214 if (pCtx->cr4 & X86_CR4_OSXSAVE) 6217 6215 { 6218 if (p IemCpu->uCpl == 0)6216 if (pVCpu->iem.s.uCpl == 0) 6219 6217 { 6220 6218 uint32_t uEcx = pCtx->ecx; … … 6224 6222 case 0: 6225 6223 { 6226 int rc = CPUMSetGuestXcr0( IEMCPU_TO_VMCPU(pIemCpu), uNewValue);6224 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue); 6227 6225 if (rc == VINF_SUCCESS) 6228 6226 break; 6229 6227 Assert(rc == VERR_CPUM_RAISE_GP_0); 6230 6228 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue)); 6231 return iemRaiseGeneralProtectionFault0(p IemCpu);6229 return iemRaiseGeneralProtectionFault0(pVCpu); 6232 6230 } 6233 6231 … … 6235 6233 default: 6236 6234 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue)); 6237 return iemRaiseGeneralProtectionFault0(p IemCpu);6235 return iemRaiseGeneralProtectionFault0(pVCpu); 6238 6236 6239 6237 } 6240 6238 6241 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6239 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6242 6240 return VINF_SUCCESS; 6243 6241 } 6244 6242 6245 Log(("xsetbv cpl=%u -> GP(0)\n", p IemCpu->uCpl));6246 return iemRaiseGeneralProtectionFault0(p IemCpu);6243 Log(("xsetbv cpl=%u -> GP(0)\n", pVCpu->iem.s.uCpl)); 6244 return iemRaiseGeneralProtectionFault0(pVCpu); 6247 6245 } 6248 6246 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n")); 6249 return iemRaiseUndefinedOpcode(p IemCpu);6247 return iemRaiseUndefinedOpcode(pVCpu); 6250 6248 } 6251 6249 … … 6260 6258 IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts) 6261 6259 { 6262 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6260 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6263 6261 6264 6262 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS)) 6265 return iemRaiseDeviceNotAvailable(p IemCpu);6263 return iemRaiseDeviceNotAvailable(pVCpu); 6266 6264 6267 6265 NOREF(fCheckXcpts); /** @todo trigger pending exceptions: 6268 6266 if (fCheckXcpts && TODO ) 6269 return iemRaiseMathFault(p IemCpu);6267 return iemRaiseMathFault(pVCpu); 6270 6268 */ 6271 6269 … … 6282 6280 pXState->x87.FOP = 0; 6283 6281 6284 iemHlpUsedFpu(p IemCpu);6285 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6282 iemHlpUsedFpu(pVCpu); 6283 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6286 6284 return VINF_SUCCESS; 6287 6285 } … … 6297 6295 IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 6298 6296 { 6299 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6297 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6300 6298 6301 6299 /* … … 6303 6301 */ 6304 6302 if (pCtx->cr0 & X86_CR0_EM) 6305 return iemRaiseUndefinedOpcode(p IemCpu);6303 return iemRaiseUndefinedOpcode(pVCpu); 6306 6304 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM)) 6307 return iemRaiseDeviceNotAvailable(p IemCpu);6305 return iemRaiseDeviceNotAvailable(pVCpu); 6308 6306 if (GCPtrEff & 15) 6309 6307 { … … 6312 6310 if ( (pCtx->cr0 & X86_CR0_AM) 6313 6311 && pCtx->eflags.Bits.u1AC 6314 && p IemCpu->uCpl == 3)6315 return iemRaiseAlignmentCheckException(p IemCpu);6316 return iemRaiseGeneralProtectionFault0(p IemCpu);6312 && pVCpu->iem.s.uCpl == 3) 6313 return iemRaiseAlignmentCheckException(pVCpu); 6314 return iemRaiseGeneralProtectionFault0(pVCpu); 6317 6315 } 6318 6316 … … 6321 6319 */ 6322 6320 void *pvMem512; 6323 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);6321 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 6324 6322 if (rcStrict != VINF_SUCCESS) 6325 6323 return rcStrict; … … 6371 6369 /* XMM registers. */ 6372 6370 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR) 6373 || p IemCpu->enmCpuMode != IEMMODE_64BIT6374 || p IemCpu->uCpl != 0)6371 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT 6372 || pVCpu->iem.s.uCpl != 0) 6375 6373 { 6376 6374 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; … … 6384 6382 * Commit the memory. 6385 6383 */ 6386 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);6384 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 6387 6385 if (rcStrict != VINF_SUCCESS) 6388 6386 return rcStrict; 6389 6387 6390 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6388 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6391 6389 return VINF_SUCCESS; 6392 6390 } … … 6401 6399 IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 6402 6400 { 6403 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6401 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6404 6402 6405 6403 /* … … 6407 6405 */ 6408 6406 if (pCtx->cr0 & X86_CR0_EM) 6409 return iemRaiseUndefinedOpcode(p IemCpu);6407 return iemRaiseUndefinedOpcode(pVCpu); 6410 6408 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM)) 6411 return iemRaiseDeviceNotAvailable(p IemCpu);6409 return iemRaiseDeviceNotAvailable(pVCpu); 6412 6410 if (GCPtrEff & 15) 6413 6411 { … … 6416 6414 if ( (pCtx->cr0 & X86_CR0_AM) 6417 6415 && pCtx->eflags.Bits.u1AC 6418 && p IemCpu->uCpl == 3)6419 return iemRaiseAlignmentCheckException(p IemCpu);6420 return iemRaiseGeneralProtectionFault0(p IemCpu);6416 && pVCpu->iem.s.uCpl == 3) 6417 return iemRaiseAlignmentCheckException(pVCpu); 6418 return iemRaiseGeneralProtectionFault0(pVCpu); 6421 6419 } 6422 6420 … … 6425 6423 */ 6426 6424 void *pvMem512; 6427 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);6425 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R); 6428 6426 if (rcStrict != VINF_SUCCESS) 6429 6427 return rcStrict; … … 6439 6437 { 6440 6438 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK)); 6441 return iemRaiseGeneralProtectionFault0(p IemCpu);6439 return iemRaiseGeneralProtectionFault0(pVCpu); 6442 6440 } 6443 6441 … … 6464 6462 6465 6463 /* FPU IP, CS, DP and DS. */ 6466 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)6464 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6467 6465 { 6468 6466 pDst->FPUIP = pSrc->FPUIP; … … 6485 6483 /* XMM registers. */ 6486 6484 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR) 6487 || p IemCpu->enmCpuMode != IEMMODE_64BIT6488 || p IemCpu->uCpl != 0)6485 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT 6486 || pVCpu->iem.s.uCpl != 0) 6489 6487 { 6490 6488 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; … … 6496 6494 * Commit the memory. 6497 6495 */ 6498 rcStrict = iemMemCommitAndUnmap(p IemCpu, pvMem512, IEM_ACCESS_DATA_R);6496 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R); 6499 6497 if (rcStrict != VINF_SUCCESS) 6500 6498 return rcStrict; 6501 6499 6502 iemHlpUsedFpu(p IemCpu);6503 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6500 iemHlpUsedFpu(pVCpu); 6501 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6504 6502 return VINF_SUCCESS; 6505 6503 } … … 6512 6510 * @param pCtx The CPU context. 6513 6511 */ 6514 static void iemCImplCommonFpuStoreEnv(P IEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)6512 static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx) 6515 6513 { 6516 6514 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87; … … 6520 6518 uPtr.pu16[1] = pSrcX87->FSW; 6521 6519 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87); 6522 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))6520 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 6523 6521 { 6524 6522 /** @todo Testcase: How does this work when the FPUIP/CS was saved in … … 6549 6547 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87); 6550 6548 uPtr.pu16[2*2+1] = 0xffff; 6551 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))6549 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 6552 6550 { 6553 6551 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP; … … 6575 6573 * @param pCtx The CPU context. 6576 6574 */ 6577 static void iemCImplCommonFpuRestoreEnv(P IEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)6575 static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx) 6578 6576 { 6579 6577 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87; … … 6583 6581 pDstX87->FSW = uPtr.pu16[1]; 6584 6582 pDstX87->FTW = uPtr.pu16[2]; 6585 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))6583 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 6586 6584 { 6587 6585 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4); … … 6609 6607 pDstX87->FSW = uPtr.pu16[1*2]; 6610 6608 pDstX87->FTW = uPtr.pu16[2*2]; 6611 if (IEM_IS_REAL_OR_V86_MODE(p IemCpu))6609 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 6612 6610 { 6613 6611 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4); … … 6649 6647 IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 6650 6648 { 6651 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6649 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6652 6650 RTPTRUNION uPtr; 6653 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,6651 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, 6654 6652 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 6655 6653 if (rcStrict != VINF_SUCCESS) 6656 6654 return rcStrict; 6657 6655 6658 iemCImplCommonFpuStoreEnv(p IemCpu, enmEffOpSize, uPtr, pCtx);6659 6660 rcStrict = iemMemCommitAndUnmap(p IemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);6656 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx); 6657 6658 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 6661 6659 if (rcStrict != VINF_SUCCESS) 6662 6660 return rcStrict; 6663 6661 6664 6662 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */ 6665 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6663 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6666 6664 return VINF_SUCCESS; 6667 6665 } … … 6676 6674 IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 6677 6675 { 6678 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6676 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6679 6677 RTPTRUNION uPtr; 6680 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,6678 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, 6681 6679 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 6682 6680 if (rcStrict != VINF_SUCCESS) … … 6684 6682 6685 6683 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6686 iemCImplCommonFpuStoreEnv(p IemCpu, enmEffOpSize, uPtr, pCtx);6684 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx); 6687 6685 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28)); 6688 6686 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++) … … 6693 6691 } 6694 6692 6695 rcStrict = iemMemCommitAndUnmap(p IemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);6693 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 6696 6694 if (rcStrict != VINF_SUCCESS) 6697 6695 return rcStrict; … … 6711 6709 pFpuCtx->FOP = 0; 6712 6710 6713 iemHlpUsedFpu(p IemCpu);6714 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6711 iemHlpUsedFpu(pVCpu); 6712 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6715 6713 return VINF_SUCCESS; 6716 6714 } … … 6727 6725 IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc) 6728 6726 { 6729 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6727 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6730 6728 RTCPTRUNION uPtr; 6731 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,6729 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, 6732 6730 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R); 6733 6731 if (rcStrict != VINF_SUCCESS) 6734 6732 return rcStrict; 6735 6733 6736 iemCImplCommonFpuRestoreEnv(p IemCpu, enmEffOpSize, uPtr, pCtx);6737 6738 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);6734 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx); 6735 6736 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R); 6739 6737 if (rcStrict != VINF_SUCCESS) 6740 6738 return rcStrict; 6741 6739 6742 iemHlpUsedFpu(p IemCpu);6743 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6740 iemHlpUsedFpu(pVCpu); 6741 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6744 6742 return VINF_SUCCESS; 6745 6743 } … … 6754 6752 IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc) 6755 6753 { 6756 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6754 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6757 6755 RTCPTRUNION uPtr; 6758 VBOXSTRICTRC rcStrict = iemMemMap(p IemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,6756 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, 6759 6757 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R); 6760 6758 if (rcStrict != VINF_SUCCESS) … … 6762 6760 6763 6761 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6764 iemCImplCommonFpuRestoreEnv(p IemCpu, enmEffOpSize, uPtr, pCtx);6762 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr, pCtx); 6765 6763 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28)); 6766 6764 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++) … … 6772 6770 } 6773 6771 6774 rcStrict = iemMemCommitAndUnmap(p IemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);6772 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R); 6775 6773 if (rcStrict != VINF_SUCCESS) 6776 6774 return rcStrict; 6777 6775 6778 iemHlpUsedFpu(p IemCpu);6779 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6776 iemHlpUsedFpu(pVCpu); 6777 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6780 6778 return VINF_SUCCESS; 6781 6779 } … … 6789 6787 IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw) 6790 6788 { 6791 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6789 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6792 6790 6793 6791 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */ … … 6801 6799 6802 6800 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */ 6803 iemHlpUsedFpu(p IemCpu);6804 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6801 iemHlpUsedFpu(pVCpu); 6802 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6805 6803 return VINF_SUCCESS; 6806 6804 } … … 6815 6813 IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg) 6816 6814 { 6817 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6815 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6818 6816 6819 6817 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; … … 6850 6848 } 6851 6849 6852 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6853 iemHlpUsedFpu(p IemCpu);6854 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6850 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6851 iemHlpUsedFpu(pVCpu); 6852 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6855 6853 return VINF_SUCCESS; 6856 6854 } … … 6864 6862 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop) 6865 6863 { 6866 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);6864 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6867 6865 Assert(iStReg < 8); 6868 6866 … … 6871 6869 */ 6872 6870 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS)) 6873 return iemRaiseDeviceNotAvailable(p IemCpu);6871 return iemRaiseDeviceNotAvailable(pVCpu); 6874 6872 6875 6873 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 6876 6874 uint16_t u16Fsw = pFpuCtx->FSW; 6877 6875 if (u16Fsw & X86_FSW_ES) 6878 return iemRaiseMathFault(p IemCpu);6876 return iemRaiseMathFault(pVCpu); 6879 6877 6880 6878 /* … … 6923 6921 } 6924 6922 6925 iemFpuUpdateOpcodeAndIpWorker(p IemCpu, pCtx, pFpuCtx);6926 iemHlpUsedFpu(p IemCpu);6927 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);6923 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 6924 iemHlpUsedFpu(pVCpu); 6925 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6928 6926 return VINF_SUCCESS; 6929 6927 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r60907 r62015 48 48 # define ADDR_rCX rcx 49 49 # define ADDR2_TYPE uint64_t 50 # define IS_64_BIT_CODE(a_p IemCpu)(true)50 # define IS_64_BIT_CODE(a_pVCpu) (true) 51 51 #else 52 52 # error "Bad ADDR_SIZE." … … 55 55 56 56 #if ADDR_SIZE == 64 || OP_SIZE == 64 57 # define IS_64_BIT_CODE(a_p IemCpu)(true)57 # define IS_64_BIT_CODE(a_pVCpu) (true) 58 58 #elif ADDR_SIZE == 32 59 # define IS_64_BIT_CODE(a_p IemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)59 # define IS_64_BIT_CODE(a_pVCpu) ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT) 60 60 #else 61 # define IS_64_BIT_CODE(a_p IemCpu)(false)61 # define IS_64_BIT_CODE(a_pVCpu) (false) 62 62 #endif 63 63 … … 69 69 */ 70 70 #ifdef IN_RC 71 # define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_ pIemCpu, a_fEflags) \71 # define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \ 72 72 do { \ 73 73 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \ 74 74 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \ 75 75 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) ) \ 76 || IEM_VERIFICATION_ENABLED(a_p IemCpu) )) \76 || IEM_VERIFICATION_ENABLED(a_pVCpu) )) \ 77 77 { \ 78 78 RTCCUINTREG fSavedFlags = ASMGetFlags(); \ … … 92 92 } while (0) 93 93 #else 94 # define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_ pIemCpu, a_fEflags) \94 # define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \ 95 95 do { \ 96 96 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \ 97 97 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \ 98 98 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) ) \ 99 || IEM_VERIFICATION_ENABLED(a_p IemCpu) )) \99 || IEM_VERIFICATION_ENABLED(a_pVCpu) )) \ 100 100 { /* probable */ } \ 101 101 else \ … … 113 113 * instructions. Use IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN for 114 114 * ones that are typically cheap. */ 115 #define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_ pIemCpu, a_fExitExpr) \115 #define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \ 116 116 do { \ 117 117 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \ 118 118 && !VM_FF_IS_PENDING(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \ 119 119 || (a_fExitExpr) \ 120 || IEM_VERIFICATION_ENABLED(a_p IemCpu) )) \120 || IEM_VERIFICATION_ENABLED(a_pVCpu) )) \ 121 121 { /* very likely */ } \ 122 122 else \ … … 135 135 * (status code is hidden in IEMCPU::rcPassUp by IEM memory commit code). 136 136 */ 137 #define IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_ pIemCpu, a_fExitExpr) \137 #define IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \ 138 138 do { \ 139 139 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \ 140 140 || (a_fExitExpr) \ 141 || IEM_VERIFICATION_ENABLED(a_p IemCpu) )) \141 || IEM_VERIFICATION_ENABLED(a_pVCpu) )) \ 142 142 { /* very likely */ } \ 143 143 else \ … … 155 155 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 156 156 { 157 PVM pVM = IEMCPU_TO_VM(pIemCpu); 158 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 159 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 157 PVM pVM = pVCpu->CTX_SUFF(pVM); 158 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 160 159 161 160 /* … … 165 164 if (uCounterReg == 0) 166 165 { 167 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);166 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 168 167 return VINF_SUCCESS; 169 168 } 170 169 171 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(p IemCpu, iEffSeg);170 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg); 172 171 uint64_t uSrc1Base; 173 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);172 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base); 174 173 if (rcStrict != VINF_SUCCESS) 175 174 return rcStrict; 176 175 177 176 uint64_t uSrc2Base; 178 rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uSrc2Base);177 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uSrc2Base); 179 178 if (rcStrict != VINF_SUCCESS) 180 179 return rcStrict; … … 203 202 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 204 203 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */ 205 && ( IS_64_BIT_CODE(p IemCpu)204 && ( IS_64_BIT_CODE(pVCpu) 206 205 || ( uSrc1AddrReg < pSrc1Hid->u32Limit 207 206 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit … … 212 211 { 213 212 RTGCPHYS GCPhysSrc1Mem; 214 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);213 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem); 215 214 if (rcStrict != VINF_SUCCESS) 216 215 return rcStrict; 217 216 218 217 RTGCPHYS GCPhysSrc2Mem; 219 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);218 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem); 220 219 if (rcStrict != VINF_SUCCESS) 221 220 return rcStrict; … … 227 226 PGMPAGEMAPLOCK PgLockSrc2Mem; 228 227 OP_TYPE const *puSrc2Mem; 229 rcStrict = iemMemPageMap(p IemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);228 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem); 230 229 if (rcStrict == VINF_SUCCESS) 231 230 { 232 231 PGMPAGEMAPLOCK PgLockSrc1Mem; 233 232 OP_TYPE const *puSrc1Mem; 234 rcStrict = iemMemPageMap(p IemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);233 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem); 235 234 if (rcStrict == VINF_SUCCESS) 236 235 { … … 265 264 pCtx->eflags.u = uEFlags; 266 265 267 iemMemPageUnmap(p IemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);268 iemMemPageUnmap(p IemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);266 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem); 267 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 269 268 if ( uCounterReg == 0 270 269 || !(uEFlags & X86_EFL_ZF)) 271 270 break; 272 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);271 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 273 272 continue; 274 273 } 275 iemMemPageUnmap(p IemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);274 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 276 275 } 277 276 } … … 285 284 { 286 285 OP_TYPE uValue1; 287 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uValue1, iEffSeg, uSrc1AddrReg);286 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg); 288 287 if (rcStrict != VINF_SUCCESS) 289 288 return rcStrict; 290 289 OP_TYPE uValue2; 291 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);290 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg); 292 291 if (rcStrict != VINF_SUCCESS) 293 292 return rcStrict; … … 299 298 pCtx->eflags.u = uEFlags; 300 299 cLeftPage--; 301 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, 302 uCounterReg == 0 || !(uEFlags & X86_EFL_ZF)); 300 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF)); 303 301 } while ( (int32_t)cLeftPage > 0 304 302 && (uEFlags & X86_EFL_ZF)); … … 310 308 || !(uEFlags & X86_EFL_ZF)) 311 309 break; 312 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);310 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 313 311 } 314 312 … … 316 314 * Done. 317 315 */ 318 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);316 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 319 317 return VINF_SUCCESS; 320 318 } … … 326 324 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 327 325 { 328 PVM pVM = IEMCPU_TO_VM(pIemCpu); 329 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 330 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 326 PVM pVM = pVCpu->CTX_SUFF(pVM); 327 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 331 328 332 329 /* … … 336 333 if (uCounterReg == 0) 337 334 { 338 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);335 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 339 336 return VINF_SUCCESS; 340 337 } 341 338 342 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(p IemCpu, iEffSeg);339 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg); 343 340 uint64_t uSrc1Base; 344 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);341 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base); 345 342 if (rcStrict != VINF_SUCCESS) 346 343 return rcStrict; 347 344 348 345 uint64_t uSrc2Base; 349 rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uSrc2Base);346 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uSrc2Base); 350 347 if (rcStrict != VINF_SUCCESS) 351 348 return rcStrict; … … 374 371 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 375 372 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */ 376 && ( IS_64_BIT_CODE(p IemCpu)373 && ( IS_64_BIT_CODE(pVCpu) 377 374 || ( uSrc1AddrReg < pSrc1Hid->u32Limit 378 375 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit … … 383 380 { 384 381 RTGCPHYS GCPhysSrc1Mem; 385 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);382 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem); 386 383 if (rcStrict != VINF_SUCCESS) 387 384 return rcStrict; 388 385 389 386 RTGCPHYS GCPhysSrc2Mem; 390 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);387 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem); 391 388 if (rcStrict != VINF_SUCCESS) 392 389 return rcStrict; … … 398 395 OP_TYPE const *puSrc2Mem; 399 396 PGMPAGEMAPLOCK PgLockSrc2Mem; 400 rcStrict = iemMemPageMap(p IemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);397 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem); 401 398 if (rcStrict == VINF_SUCCESS) 402 399 { 403 400 OP_TYPE const *puSrc1Mem; 404 401 PGMPAGEMAPLOCK PgLockSrc1Mem; 405 rcStrict = iemMemPageMap(p IemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);402 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem); 406 403 if (rcStrict == VINF_SUCCESS) 407 404 { … … 436 433 pCtx->eflags.u = uEFlags; 437 434 438 iemMemPageUnmap(p IemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);439 iemMemPageUnmap(p IemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);435 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem); 436 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 440 437 if ( uCounterReg == 0 441 438 || (uEFlags & X86_EFL_ZF)) 442 439 break; 443 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);440 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 444 441 continue; 445 442 } 446 iemMemPageUnmap(p IemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);443 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem); 447 444 } 448 445 } … … 456 453 { 457 454 OP_TYPE uValue1; 458 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uValue1, iEffSeg, uSrc1AddrReg);455 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg); 459 456 if (rcStrict != VINF_SUCCESS) 460 457 return rcStrict; 461 458 OP_TYPE uValue2; 462 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);459 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg); 463 460 if (rcStrict != VINF_SUCCESS) 464 461 return rcStrict; … … 470 467 pCtx->eflags.u = uEFlags; 471 468 cLeftPage--; 472 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, 473 uCounterReg == 0 || (uEFlags & X86_EFL_ZF)); 469 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF)); 474 470 } while ( (int32_t)cLeftPage > 0 475 471 && !(uEFlags & X86_EFL_ZF)); … … 481 477 || (uEFlags & X86_EFL_ZF)) 482 478 break; 483 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);479 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 484 480 } 485 481 … … 487 483 * Done. 488 484 */ 489 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);485 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 490 486 return VINF_SUCCESS; 491 487 } … … 497 493 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE)) 498 494 { 499 PVM pVM = IEMCPU_TO_VM(pIemCpu); 500 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 501 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 495 PVM pVM = pVCpu->CTX_SUFF(pVM); 496 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 502 497 503 498 /* … … 507 502 if (uCounterReg == 0) 508 503 { 509 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);504 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 510 505 return VINF_SUCCESS; 511 506 } 512 507 513 508 uint64_t uBaseAddr; 514 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);509 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); 515 510 if (rcStrict != VINF_SUCCESS) 516 511 return rcStrict; … … 535 530 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 536 531 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 537 && ( IS_64_BIT_CODE(p IemCpu)532 && ( IS_64_BIT_CODE(pVCpu) 538 533 || ( uAddrReg < pCtx->es.u32Limit 539 534 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) … … 542 537 { 543 538 RTGCPHYS GCPhysMem; 544 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);539 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem); 545 540 if (rcStrict != VINF_SUCCESS) 546 541 return rcStrict; … … 552 547 PGMPAGEMAPLOCK PgLockMem; 553 548 OP_TYPE const *puMem; 554 rcStrict = iemMemPageMap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);549 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 555 550 if (rcStrict == VINF_SUCCESS) 556 551 { … … 571 566 pCtx->eflags.u = uEFlags; 572 567 Assert(!(uEFlags & X86_EFL_ZF) == fQuit); 573 iemMemPageUnmap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);568 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 574 569 if ( fQuit 575 570 || uCounterReg == 0) … … 580 575 if (!(uVirtAddr & (OP_SIZE - 1))) 581 576 { 582 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);577 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 583 578 continue; 584 579 } … … 595 590 { 596 591 OP_TYPE uTmpValue; 597 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);592 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg); 598 593 if (rcStrict != VINF_SUCCESS) 599 594 return rcStrict; … … 604 599 pCtx->eflags.u = uEFlags; 605 600 cLeftPage--; 606 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, 607 uCounterReg == 0 || !(uEFlags & X86_EFL_ZF)); 601 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF)); 608 602 } while ( (int32_t)cLeftPage > 0 609 603 && (uEFlags & X86_EFL_ZF)); … … 615 609 || !(uEFlags & X86_EFL_ZF)) 616 610 break; 617 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);611 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 618 612 } 619 613 … … 621 615 * Done. 622 616 */ 623 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);617 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 624 618 return VINF_SUCCESS; 625 619 } … … 631 625 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE)) 632 626 { 633 PVM pVM = IEMCPU_TO_VM(pIemCpu); 634 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 635 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 627 PVM pVM = pVCpu->CTX_SUFF(pVM); 628 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 636 629 637 630 /* … … 641 634 if (uCounterReg == 0) 642 635 { 643 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);636 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 644 637 return VINF_SUCCESS; 645 638 } 646 639 647 640 uint64_t uBaseAddr; 648 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); 649 642 if (rcStrict != VINF_SUCCESS) 650 643 return rcStrict; … … 669 662 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 670 663 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 671 && ( IS_64_BIT_CODE(p IemCpu)664 && ( IS_64_BIT_CODE(pVCpu) 672 665 || ( uAddrReg < pCtx->es.u32Limit 673 666 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) … … 676 669 { 677 670 RTGCPHYS GCPhysMem; 678 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);671 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem); 679 672 if (rcStrict != VINF_SUCCESS) 680 673 return rcStrict; … … 686 679 PGMPAGEMAPLOCK PgLockMem; 687 680 OP_TYPE const *puMem; 688 rcStrict = iemMemPageMap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);681 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 689 682 if (rcStrict == VINF_SUCCESS) 690 683 { … … 705 698 pCtx->eflags.u = uEFlags; 706 699 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit); 707 iemMemPageUnmap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);700 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 708 701 if ( fQuit 709 702 || uCounterReg == 0) … … 714 707 if (!(uVirtAddr & (OP_SIZE - 1))) 715 708 { 716 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);709 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 717 710 continue; 718 711 } … … 729 722 { 730 723 OP_TYPE uTmpValue; 731 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);724 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg); 732 725 if (rcStrict != VINF_SUCCESS) 733 726 return rcStrict; … … 737 730 pCtx->eflags.u = uEFlags; 738 731 cLeftPage--; 739 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, 740 uCounterReg == 0 || (uEFlags & X86_EFL_ZF)); 732 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF)); 741 733 } while ( (int32_t)cLeftPage > 0 742 734 && !(uEFlags & X86_EFL_ZF)); … … 748 740 || (uEFlags & X86_EFL_ZF)) 749 741 break; 750 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uEFlags);742 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags); 751 743 } 752 744 … … 754 746 * Done. 755 747 */ 756 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);748 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 757 749 return VINF_SUCCESS; 758 750 } … … 766 758 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg) 767 759 { 768 PVM pVM = IEMCPU_TO_VM(pIemCpu); 769 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 770 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 760 PVM pVM = pVCpu->CTX_SUFF(pVM); 761 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 771 762 772 763 /* … … 776 767 if (uCounterReg == 0) 777 768 { 778 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);769 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 779 770 return VINF_SUCCESS; 780 771 } 781 772 782 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(p IemCpu, iEffSeg);773 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg); 783 774 uint64_t uSrcBase; 784 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, pSrcHid, iEffSeg, &uSrcBase);775 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uSrcBase); 785 776 if (rcStrict != VINF_SUCCESS) 786 777 return rcStrict; 787 778 788 779 uint64_t uDstBase; 789 rcStrict = iemMemSegCheckWriteAccessEx(p IemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uDstBase);780 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uDstBase); 790 781 if (rcStrict != VINF_SUCCESS) 791 782 return rcStrict; … … 798 789 * Be careful with handle bypassing. 799 790 */ 800 if (p IemCpu->fBypassHandlers)791 if (pVCpu->iem.s.fBypassHandlers) 801 792 { 802 793 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__)); … … 810 801 */ 811 802 #ifdef IEM_VERIFICATION_MODE_FULL 812 if ( IEM_VERIFICATION_ENABLED(p IemCpu)803 if ( IEM_VERIFICATION_ENABLED(pVCpu) 813 804 && (cbIncr > 0 814 805 ? uSrcAddrReg <= uDstAddrReg … … 816 807 : uDstAddrReg <= uSrcAddrReg 817 808 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg)) 818 p IemCpu->fOverlappingMovs = true;809 pVCpu->iem.s.fOverlappingMovs = true; 819 810 #endif 820 811 … … 837 828 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 838 829 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 839 && ( IS_64_BIT_CODE(p IemCpu)830 && ( IS_64_BIT_CODE(pVCpu) 840 831 || ( uSrcAddrReg < pSrcHid->u32Limit 841 832 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit … … 846 837 { 847 838 RTGCPHYS GCPhysSrcMem; 848 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);839 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem); 849 840 if (rcStrict != VINF_SUCCESS) 850 841 return rcStrict; 851 842 852 843 RTGCPHYS GCPhysDstMem; 853 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);844 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem); 854 845 if (rcStrict != VINF_SUCCESS) 855 846 return rcStrict; … … 861 852 PGMPAGEMAPLOCK PgLockDstMem; 862 853 OP_TYPE *puDstMem; 863 rcStrict = iemMemPageMap(p IemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);854 rcStrict = iemMemPageMap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem); 864 855 if (rcStrict == VINF_SUCCESS) 865 856 { 866 857 PGMPAGEMAPLOCK PgLockSrcMem; 867 858 OP_TYPE const *puSrcMem; 868 rcStrict = iemMemPageMap(p IemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);859 rcStrict = iemMemPageMap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem); 869 860 if (rcStrict == VINF_SUCCESS) 870 861 { … … 886 877 pCtx->ADDR_rCX = uCounterReg -= cLeftPage; 887 878 888 iemMemPageUnmap(p IemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);889 iemMemPageUnmap(p IemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);879 iemMemPageUnmap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem); 880 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem); 890 881 891 882 if (uCounterReg == 0) 892 883 break; 893 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);884 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 894 885 continue; 895 886 } 896 iemMemPageUnmap(p IemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);887 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem); 897 888 } 898 889 } … … 906 897 { 907 898 OP_TYPE uValue; 908 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uValue, iEffSeg, uSrcAddrReg);909 if (rcStrict != VINF_SUCCESS) 910 return rcStrict; 911 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(p IemCpu, X86_SREG_ES, uDstAddrReg, uValue);899 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uSrcAddrReg); 900 if (rcStrict != VINF_SUCCESS) 901 return rcStrict; 902 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uDstAddrReg, uValue); 912 903 if (rcStrict != VINF_SUCCESS) 913 904 return rcStrict; … … 917 908 pCtx->ADDR_rCX = --uCounterReg; 918 909 cLeftPage--; 919 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uCounterReg == 0);910 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); 920 911 } while ((int32_t)cLeftPage > 0); 921 912 … … 925 916 if (uCounterReg == 0) 926 917 break; 927 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);918 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 928 919 } 929 920 … … 931 922 * Done. 932 923 */ 933 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);924 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 934 925 return VINF_SUCCESS; 935 926 } … … 941 932 IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE)) 942 933 { 943 PVM pVM = IEMCPU_TO_VM(pIemCpu); 944 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 945 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 934 PVM pVM = pVCpu->CTX_SUFF(pVM); 935 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 946 936 947 937 /* … … 951 941 if (uCounterReg == 0) 952 942 { 953 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);943 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 954 944 return VINF_SUCCESS; 955 945 } 956 946 957 947 uint64_t uBaseAddr; 958 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(p IemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);948 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); 959 949 if (rcStrict != VINF_SUCCESS) 960 950 return rcStrict; … … 968 958 */ 969 959 /** @todo Permit doing a page if correctly aligned. */ 970 if (p IemCpu->fBypassHandlers)960 if (pVCpu->iem.s.fBypassHandlers) 971 961 { 972 962 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__)); … … 988 978 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 989 979 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 990 && ( IS_64_BIT_CODE(p IemCpu)980 && ( IS_64_BIT_CODE(pVCpu) 991 981 || ( uAddrReg < pCtx->es.u32Limit 992 982 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) … … 995 985 { 996 986 RTGCPHYS GCPhysMem; 997 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);987 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem); 998 988 if (rcStrict != VINF_SUCCESS) 999 989 return rcStrict; … … 1005 995 PGMPAGEMAPLOCK PgLockMem; 1006 996 OP_TYPE *puMem; 1007 rcStrict = iemMemPageMap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);997 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem); 1008 998 if (rcStrict == VINF_SUCCESS) 1009 999 { … … 1022 1012 #endif 1023 1013 1024 iemMemPageUnmap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);1014 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem); 1025 1015 1026 1016 if (uCounterReg == 0) … … 1031 1021 if (!(uVirtAddr & (OP_SIZE - 1))) 1032 1022 { 1033 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1023 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1034 1024 continue; 1035 1025 } … … 1045 1035 do 1046 1036 { 1047 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(p IemCpu, X86_SREG_ES, uAddrReg, uValue);1037 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uAddrReg, uValue); 1048 1038 if (rcStrict != VINF_SUCCESS) 1049 1039 return rcStrict; … … 1051 1041 pCtx->ADDR_rCX = --uCounterReg; 1052 1042 cLeftPage--; 1053 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uCounterReg == 0);1043 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); 1054 1044 } while ((int32_t)cLeftPage > 0); 1055 1045 … … 1059 1049 if (uCounterReg == 0) 1060 1050 break; 1061 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1051 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1062 1052 } 1063 1053 … … 1065 1055 * Done. 1066 1056 */ 1067 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1057 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1068 1058 return VINF_SUCCESS; 1069 1059 } … … 1075 1065 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg) 1076 1066 { 1077 PVM pVM = IEMCPU_TO_VM(pIemCpu); 1078 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 1079 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1067 PVM pVM = pVCpu->CTX_SUFF(pVM); 1068 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1080 1069 1081 1070 /* … … 1085 1074 if (uCounterReg == 0) 1086 1075 { 1087 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1076 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1088 1077 return VINF_SUCCESS; 1089 1078 } 1090 1079 1091 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(p IemCpu, iEffSeg);1080 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg); 1092 1081 uint64_t uBaseAddr; 1093 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, pSrcHid, iEffSeg, &uBaseAddr);1082 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uBaseAddr); 1094 1083 if (rcStrict != VINF_SUCCESS) 1095 1084 return rcStrict; … … 1112 1101 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 1113 1102 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 1114 && ( IS_64_BIT_CODE(p IemCpu)1103 && ( IS_64_BIT_CODE(pVCpu) 1115 1104 || ( uAddrReg < pSrcHid->u32Limit 1116 1105 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit) … … 1119 1108 { 1120 1109 RTGCPHYS GCPhysMem; 1121 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);1110 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem); 1122 1111 if (rcStrict != VINF_SUCCESS) 1123 1112 return rcStrict; … … 1129 1118 PGMPAGEMAPLOCK PgLockMem; 1130 1119 OP_TYPE const *puMem; 1131 rcStrict = iemMemPageMap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);1120 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 1132 1121 if (rcStrict == VINF_SUCCESS) 1133 1122 { … … 1140 1129 pCtx->ADDR_rCX = uCounterReg -= cLeftPage; 1141 1130 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr; 1142 iemMemPageUnmap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);1131 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 1143 1132 1144 1133 if (uCounterReg == 0) … … 1149 1138 if (!(uVirtAddr & (OP_SIZE - 1))) 1150 1139 { 1151 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1140 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1152 1141 continue; 1153 1142 } … … 1164 1153 { 1165 1154 OP_TYPE uTmpValue; 1166 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uTmpValue, iEffSeg, uAddrReg);1155 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, iEffSeg, uAddrReg); 1167 1156 if (rcStrict != VINF_SUCCESS) 1168 1157 return rcStrict; … … 1175 1164 pCtx->ADDR_rCX = --uCounterReg; 1176 1165 cLeftPage--; 1177 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uCounterReg == 0);1166 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); 1178 1167 } while ((int32_t)cLeftPage > 0); 1179 1168 … … 1186 1175 if (uCounterReg == 0) 1187 1176 break; 1188 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1177 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1189 1178 } 1190 1179 … … 1192 1181 * Done. 1193 1182 */ 1194 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1183 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1195 1184 return VINF_SUCCESS; 1196 1185 } … … 1204 1193 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked) 1205 1194 { 1206 PVM pVM = IEMCPU_TO_VM(pIemCpu);1207 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);1195 PVM pVM = pVCpu->CTX_SUFF(pVM); 1196 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1208 1197 VBOXSTRICTRC rcStrict; 1209 1198 … … 1211 1200 * Be careful with handle bypassing. 1212 1201 */ 1213 if (p IemCpu->fBypassHandlers)1202 if (pVCpu->iem.s.fBypassHandlers) 1214 1203 { 1215 1204 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__)); … … 1224 1213 if (!fIoChecked) 1225 1214 { 1226 rcStrict = iemHlpCheckPortIOPermission(p IemCpu, pCtx, pCtx->dx, OP_SIZE / 8);1215 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, pCtx->dx, OP_SIZE / 8); 1227 1216 if (rcStrict != VINF_SUCCESS) 1228 1217 return rcStrict; … … 1230 1219 1231 1220 OP_TYPE *puMem; 1232 rcStrict = iemMemMap(p IemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);1221 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W); 1233 1222 if (rcStrict != VINF_SUCCESS) 1234 1223 return rcStrict; 1235 1224 1236 1225 uint32_t u32Value = 0; 1237 if (!IEM_VERIFICATION_ENABLED(p IemCpu))1238 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);1226 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1227 rcStrict = IOMIOPortRead(pVM, pVCpu, pCtx->dx, &u32Value, OP_SIZE / 8); 1239 1228 else 1240 rcStrict = iemVerifyFakeIOPortRead(p IemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);1229 rcStrict = iemVerifyFakeIOPortRead(pVCpu, pCtx->dx, &u32Value, OP_SIZE / 8); 1241 1230 if (IOM_SUCCESS(rcStrict)) 1242 1231 { 1243 1232 *puMem = (OP_TYPE)u32Value; 1244 1233 # ifdef IN_RING3 1245 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(p IemCpu, puMem, IEM_ACCESS_DATA_W);1234 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W); 1246 1235 # else 1247 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(p IemCpu, puMem, IEM_ACCESS_DATA_W);1236 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W); 1248 1237 # endif 1249 1238 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS)) … … 1253 1242 else 1254 1243 pCtx->ADDR_rDI -= OP_SIZE / 8; 1255 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1244 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1256 1245 } 1257 1246 else … … 1267 1256 IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked) 1268 1257 { 1269 PVM pVM = IEMCPU_TO_VM(pIemCpu); 1270 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 1271 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1258 PVM pVM = pVCpu->CTX_SUFF(pVM); 1259 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1272 1260 1273 1261 /* … … 1278 1266 if (!fIoChecked) 1279 1267 { 1280 rcStrict = iemHlpCheckPortIOPermission(p IemCpu, pCtx, u16Port, OP_SIZE / 8);1268 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, OP_SIZE / 8); 1281 1269 if (rcStrict != VINF_SUCCESS) 1282 1270 return rcStrict; … … 1286 1274 if (uCounterReg == 0) 1287 1275 { 1288 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1276 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1289 1277 return VINF_SUCCESS; 1290 1278 } 1291 1279 1292 1280 uint64_t uBaseAddr; 1293 rcStrict = iemMemSegCheckWriteAccessEx(p IemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);1281 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); 1294 1282 if (rcStrict != VINF_SUCCESS) 1295 1283 return rcStrict; … … 1301 1289 * Be careful with handle bypassing. 1302 1290 */ 1303 if (p IemCpu->fBypassHandlers)1291 if (pVCpu->iem.s.fBypassHandlers) 1304 1292 { 1305 1293 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__)); … … 1321 1309 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 1322 1310 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 1323 && ( IS_64_BIT_CODE(p IemCpu)1311 && ( IS_64_BIT_CODE(pVCpu) 1324 1312 || ( uAddrReg < pCtx->es.u32Limit 1325 1313 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 1326 1314 ) 1327 && !IEM_VERIFICATION_ENABLED(p IemCpu)1315 && !IEM_VERIFICATION_ENABLED(pVCpu) 1328 1316 ) 1329 1317 { 1330 1318 RTGCPHYS GCPhysMem; 1331 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);1319 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem); 1332 1320 if (rcStrict != VINF_SUCCESS) 1333 1321 return rcStrict; … … 1339 1327 PGMPAGEMAPLOCK PgLockMem; 1340 1328 OP_TYPE *puMem; 1341 rcStrict = iemMemPageMap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);1329 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem); 1342 1330 if (rcStrict == VINF_SUCCESS) 1343 1331 { … … 1351 1339 puMem += cActualTransfers; 1352 1340 1353 iemMemPageUnmap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);1341 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem); 1354 1342 1355 1343 if (rcStrict != VINF_SUCCESS) … … 1357 1345 if (IOM_SUCCESS(rcStrict)) 1358 1346 { 1359 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);1347 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1360 1348 if (uCounterReg == 0) 1361 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1349 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1362 1350 } 1363 1351 return rcStrict; … … 1370 1358 if (!(uVirtAddr & (OP_SIZE - 1))) 1371 1359 { 1372 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1360 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1373 1361 continue; 1374 1362 } … … 1390 1378 { 1391 1379 OP_TYPE *puMem; 1392 rcStrict = iemMemMap(p IemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);1380 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W); 1393 1381 if (rcStrict != VINF_SUCCESS) 1394 1382 return rcStrict; 1395 1383 1396 1384 uint32_t u32Value = 0; 1397 if (!IEM_VERIFICATION_ENABLED(p IemCpu))1385 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1398 1386 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8); 1399 1387 else 1400 rcStrict = iemVerifyFakeIOPortRead(p IemCpu, u16Port, &u32Value, OP_SIZE / 8);1388 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, OP_SIZE / 8); 1401 1389 if (!IOM_SUCCESS(rcStrict)) 1402 1390 return rcStrict; … … 1404 1392 *puMem = (OP_TYPE)u32Value; 1405 1393 # ifdef IN_RING3 1406 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(p IemCpu, puMem, IEM_ACCESS_DATA_W);1394 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W); 1407 1395 # else 1408 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(p IemCpu, puMem, IEM_ACCESS_DATA_W);1396 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W); 1409 1397 # endif 1410 1398 if (rcStrict2 == VINF_SUCCESS) … … 1421 1409 { 1422 1410 if (uCounterReg == 0) 1423 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1424 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);1411 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1412 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1425 1413 return rcStrict; 1426 1414 } 1427 1415 1428 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uCounterReg == 0);1416 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); 1429 1417 } while ((int32_t)cLeftPage > 0); 1430 1418 … … 1435 1423 if (uCounterReg == 0) 1436 1424 break; 1437 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1425 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1438 1426 } 1439 1427 … … 1441 1429 * Done. 1442 1430 */ 1443 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1431 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1444 1432 return VINF_SUCCESS; 1445 1433 } … … 1451 1439 IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked) 1452 1440 { 1453 PVM pVM = IEMCPU_TO_VM(pIemCpu);1454 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);1441 PVM pVM = pVCpu->CTX_SUFF(pVM); 1442 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1455 1443 VBOXSTRICTRC rcStrict; 1456 1444 … … 1462 1450 if (!fIoChecked) 1463 1451 { 1464 rcStrict = iemHlpCheckPortIOPermission(p IemCpu, pCtx, pCtx->dx, OP_SIZE / 8);1452 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, pCtx->dx, OP_SIZE / 8); 1465 1453 if (rcStrict != VINF_SUCCESS) 1466 1454 return rcStrict; … … 1468 1456 1469 1457 OP_TYPE uValue; 1470 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);1458 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pCtx->ADDR_rSI); 1471 1459 if (rcStrict == VINF_SUCCESS) 1472 1460 { 1473 if (!IEM_VERIFICATION_ENABLED(p IemCpu))1474 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);1461 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1462 rcStrict = IOMIOPortWrite(pVM, pVCpu, pCtx->dx, uValue, OP_SIZE / 8); 1475 1463 else 1476 rcStrict = iemVerifyFakeIOPortWrite(p IemCpu, pCtx->dx, uValue, OP_SIZE / 8);1464 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, pCtx->dx, uValue, OP_SIZE / 8); 1477 1465 if (IOM_SUCCESS(rcStrict)) 1478 1466 { … … 1481 1469 else 1482 1470 pCtx->ADDR_rSI -= OP_SIZE / 8; 1483 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1484 if (rcStrict != VINF_SUCCESS) 1485 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);1471 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1472 if (rcStrict != VINF_SUCCESS) 1473 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1486 1474 } 1487 1475 } … … 1495 1483 IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked) 1496 1484 { 1497 PVM pVM = IEMCPU_TO_VM(pIemCpu); 1498 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 1499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1485 PVM pVM = pVCpu->CTX_SUFF(pVM); 1486 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 1500 1487 1501 1488 /* … … 1506 1493 if (!fIoChecked) 1507 1494 { 1508 rcStrict = iemHlpCheckPortIOPermission(p IemCpu, pCtx, u16Port, OP_SIZE / 8);1495 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx, u16Port, OP_SIZE / 8); 1509 1496 if (rcStrict != VINF_SUCCESS) 1510 1497 return rcStrict; … … 1514 1501 if (uCounterReg == 0) 1515 1502 { 1516 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1503 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1517 1504 return VINF_SUCCESS; 1518 1505 } 1519 1506 1520 PCCPUMSELREGHID pHid = iemSRegGetHid(p IemCpu, iEffSeg);1507 PCCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iEffSeg); 1521 1508 uint64_t uBaseAddr; 1522 rcStrict = iemMemSegCheckReadAccessEx(p IemCpu, pHid, iEffSeg, &uBaseAddr);1509 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pHid, iEffSeg, &uBaseAddr); 1523 1510 if (rcStrict != VINF_SUCCESS) 1524 1511 return rcStrict; … … 1541 1528 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */ 1542 1529 && cbIncr > 0 /** @todo Implement reverse direction string ops. */ 1543 && ( IS_64_BIT_CODE(p IemCpu)1530 && ( IS_64_BIT_CODE(pVCpu) 1544 1531 || ( uAddrReg < pHid->u32Limit 1545 1532 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit) 1546 1533 ) 1547 && !IEM_VERIFICATION_ENABLED(p IemCpu)1534 && !IEM_VERIFICATION_ENABLED(pVCpu) 1548 1535 ) 1549 1536 { 1550 1537 RTGCPHYS GCPhysMem; 1551 rcStrict = iemMemPageTranslateAndCheckAccess(p IemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);1538 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem); 1552 1539 if (rcStrict != VINF_SUCCESS) 1553 1540 return rcStrict; … … 1559 1546 PGMPAGEMAPLOCK PgLockMem; 1560 1547 OP_TYPE const *puMem; 1561 rcStrict = iemMemPageMap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);1548 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem); 1562 1549 if (rcStrict == VINF_SUCCESS) 1563 1550 { … … 1571 1558 puMem += cActualTransfers; 1572 1559 1573 iemMemPageUnmap(p IemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);1560 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem); 1574 1561 1575 1562 if (rcStrict != VINF_SUCCESS) … … 1577 1564 if (IOM_SUCCESS(rcStrict)) 1578 1565 { 1579 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);1566 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1580 1567 if (uCounterReg == 0) 1581 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1568 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1582 1569 } 1583 1570 return rcStrict; … … 1591 1578 if (!(uVirtAddr & (OP_SIZE - 1))) 1592 1579 { 1593 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1580 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1594 1581 continue; 1595 1582 } … … 1611 1598 { 1612 1599 OP_TYPE uValue; 1613 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(p IemCpu, &uValue, iEffSeg, uAddrReg);1614 if (rcStrict != VINF_SUCCESS) 1615 return rcStrict; 1616 1617 if (!IEM_VERIFICATION_ENABLED(p IemCpu))1600 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uAddrReg); 1601 if (rcStrict != VINF_SUCCESS) 1602 return rcStrict; 1603 1604 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1618 1605 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8); 1619 1606 else 1620 rcStrict = iemVerifyFakeIOPortWrite(p IemCpu, u16Port, uValue, OP_SIZE / 8);1607 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, uValue, OP_SIZE / 8); 1621 1608 if (IOM_SUCCESS(rcStrict)) 1622 1609 { … … 1630 1617 { 1631 1618 if (uCounterReg == 0) 1632 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1633 rcStrict = iemSetPassUpStatus(p IemCpu, rcStrict);1619 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1620 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 1634 1621 } 1635 1622 return rcStrict; 1636 1623 } 1637 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu,uCounterReg == 0);1624 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0); 1638 1625 } while ((int32_t)cLeftPage > 0); 1639 1626 … … 1644 1631 if (uCounterReg == 0) 1645 1632 break; 1646 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, p IemCpu, pCtx->eflags.u);1633 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u); 1647 1634 } 1648 1635 … … 1650 1637 * Done. 1651 1638 */ 1652 iemRegAddToRipAndClearRF(p IemCpu, cbInstr);1639 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1653 1640 return VINF_SUCCESS; 1654 1641 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r61885 r62015 45 45 IEM_MC_ARG(uint32_t *, pEFlags, 2); 46 46 47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 49 49 IEM_MC_REF_EFLAGS(pEFlags); 50 50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags); … … 68 68 69 69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 70 IEM_MC_MEM_MAP(pu8Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 72 72 IEM_MC_FETCH_EFLAGS(EFlags); 73 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))73 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 74 74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags); 75 75 else … … 102 102 IEMOP_HLP_NO_LOCK_PREFIX(); 103 103 104 switch (p IemCpu->enmEffOpSize)104 switch (pVCpu->iem.s.enmEffOpSize) 105 105 { 106 106 case IEMMODE_16BIT: … … 110 110 IEM_MC_ARG(uint32_t *, pEFlags, 2); 111 111 112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 114 114 IEM_MC_REF_EFLAGS(pEFlags); 115 115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); … … 125 125 IEM_MC_ARG(uint32_t *, pEFlags, 2); 126 126 127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 129 129 IEM_MC_REF_EFLAGS(pEFlags); 130 130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); … … 142 142 IEM_MC_ARG(uint32_t *, pEFlags, 2); 143 143 144 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);145 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);144 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 145 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 146 146 IEM_MC_REF_EFLAGS(pEFlags); 147 147 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); … … 160 160 */ 161 161 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */; 162 switch (p IemCpu->enmEffOpSize)162 switch (pVCpu->iem.s.enmEffOpSize) 163 163 { 164 164 case IEMMODE_16BIT: … … 170 170 171 171 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 172 IEM_MC_MEM_MAP(pu16Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);173 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);172 IEM_MC_MEM_MAP(pu16Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 173 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 174 174 IEM_MC_FETCH_EFLAGS(EFlags); 175 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))175 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 176 176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); 177 177 else … … 192 192 193 193 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 194 IEM_MC_MEM_MAP(pu32Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);195 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);194 IEM_MC_MEM_MAP(pu32Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 195 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 196 196 IEM_MC_FETCH_EFLAGS(EFlags); 197 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))197 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 198 198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); 199 199 else … … 214 214 215 215 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 216 IEM_MC_MEM_MAP(pu64Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);217 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);216 IEM_MC_MEM_MAP(pu64Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 217 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 218 218 IEM_MC_FETCH_EFLAGS(EFlags); 219 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))219 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 220 220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); 221 221 else … … 254 254 IEM_MC_ARG(uint32_t *, pEFlags, 2); 255 255 256 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);257 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);256 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 257 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 258 258 IEM_MC_REF_EFLAGS(pEFlags); 259 259 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags); … … 274 274 275 275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 276 IEM_MC_FETCH_MEM_U8(u8Src, p IemCpu->iEffSeg, GCPtrEffDst);277 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);276 IEM_MC_FETCH_MEM_U8(u8Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 277 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 278 278 IEM_MC_REF_EFLAGS(pEFlags); 279 279 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags); … … 302 302 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 303 303 { 304 switch (p IemCpu->enmEffOpSize)304 switch (pVCpu->iem.s.enmEffOpSize) 305 305 { 306 306 case IEMMODE_16BIT: … … 310 310 IEM_MC_ARG(uint32_t *, pEFlags, 2); 311 311 312 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);313 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);312 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 313 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 314 314 IEM_MC_REF_EFLAGS(pEFlags); 315 315 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); … … 325 325 IEM_MC_ARG(uint32_t *, pEFlags, 2); 326 326 327 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);328 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);327 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 328 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 329 329 IEM_MC_REF_EFLAGS(pEFlags); 330 330 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); … … 341 341 IEM_MC_ARG(uint32_t *, pEFlags, 2); 342 342 343 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);344 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);343 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 344 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 345 345 IEM_MC_REF_EFLAGS(pEFlags); 346 346 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); … … 356 356 * We're accessing memory. 357 357 */ 358 switch (p IemCpu->enmEffOpSize)358 switch (pVCpu->iem.s.enmEffOpSize) 359 359 { 360 360 case IEMMODE_16BIT: … … 366 366 367 367 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 368 IEM_MC_FETCH_MEM_U16(u16Src, p IemCpu->iEffSeg, GCPtrEffDst);369 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);368 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 369 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 370 370 IEM_MC_REF_EFLAGS(pEFlags); 371 371 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); … … 383 383 384 384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 385 IEM_MC_FETCH_MEM_U32(u32Src, p IemCpu->iEffSeg, GCPtrEffDst);386 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);385 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 386 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 387 387 IEM_MC_REF_EFLAGS(pEFlags); 388 388 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); … … 401 401 402 402 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 403 IEM_MC_FETCH_MEM_U64(u64Src, p IemCpu->iEffSeg, GCPtrEffDst);404 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);403 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 404 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 405 405 IEM_MC_REF_EFLAGS(pEFlags); 406 406 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); … … 449 449 FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl) 450 450 { 451 switch (p IemCpu->enmEffOpSize)451 switch (pVCpu->iem.s.enmEffOpSize) 452 452 { 453 453 case IEMMODE_16BIT: … … 555 555 { 556 556 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 557 switch (p IemCpu->enmEffOpSize)557 switch (pVCpu->iem.s.enmEffOpSize) 558 558 { 559 559 case IEMMODE_16BIT: … … 561 561 IEM_MC_LOCAL(uint16_t, u16Ldtr); 562 562 IEM_MC_FETCH_LDTR_U16(u16Ldtr); 563 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u16Ldtr);563 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u16Ldtr); 564 564 IEM_MC_ADVANCE_RIP(); 565 565 IEM_MC_END(); … … 570 570 IEM_MC_LOCAL(uint32_t, u32Ldtr); 571 571 IEM_MC_FETCH_LDTR_U32(u32Ldtr); 572 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Ldtr);572 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Ldtr); 573 573 IEM_MC_ADVANCE_RIP(); 574 574 IEM_MC_END(); … … 579 579 IEM_MC_LOCAL(uint64_t, u64Ldtr); 580 580 IEM_MC_FETCH_LDTR_U64(u64Ldtr); 581 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Ldtr);581 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Ldtr); 582 582 IEM_MC_ADVANCE_RIP(); 583 583 IEM_MC_END(); … … 595 595 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 596 596 IEM_MC_FETCH_LDTR_U16(u16Ldtr); 597 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Ldtr);597 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Ldtr); 598 598 IEM_MC_ADVANCE_RIP(); 599 599 IEM_MC_END(); … … 613 613 { 614 614 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 615 switch (p IemCpu->enmEffOpSize)615 switch (pVCpu->iem.s.enmEffOpSize) 616 616 { 617 617 case IEMMODE_16BIT: … … 619 619 IEM_MC_LOCAL(uint16_t, u16Tr); 620 620 IEM_MC_FETCH_TR_U16(u16Tr); 621 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u16Tr);621 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u16Tr); 622 622 IEM_MC_ADVANCE_RIP(); 623 623 IEM_MC_END(); … … 628 628 IEM_MC_LOCAL(uint32_t, u32Tr); 629 629 IEM_MC_FETCH_TR_U32(u32Tr); 630 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Tr);630 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Tr); 631 631 IEM_MC_ADVANCE_RIP(); 632 632 IEM_MC_END(); … … 637 637 IEM_MC_LOCAL(uint64_t, u64Tr); 638 638 IEM_MC_FETCH_TR_U64(u64Tr); 639 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Tr);639 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Tr); 640 640 IEM_MC_ADVANCE_RIP(); 641 641 IEM_MC_END(); … … 653 653 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 654 654 IEM_MC_FETCH_TR_U16(u16Tr); 655 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Tr);655 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Tr); 656 656 IEM_MC_ADVANCE_RIP(); 657 657 IEM_MC_END(); … … 673 673 IEM_MC_BEGIN(1, 0); 674 674 IEM_MC_ARG(uint16_t, u16Sel, 0); 675 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);675 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 676 676 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel); 677 677 IEM_MC_END(); … … 685 685 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS); 686 686 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test order */ 687 IEM_MC_FETCH_MEM_U16(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc);687 IEM_MC_FETCH_MEM_U16(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 688 688 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel); 689 689 IEM_MC_END(); … … 705 705 IEM_MC_BEGIN(1, 0); 706 706 IEM_MC_ARG(uint16_t, u16Sel, 0); 707 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);707 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 708 708 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel); 709 709 IEM_MC_END(); … … 717 717 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 718 718 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test ordre */ 719 IEM_MC_FETCH_MEM_U16(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc);719 IEM_MC_FETCH_MEM_U16(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 720 720 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel); 721 721 IEM_MC_END(); … … 737 737 IEM_MC_ARG(uint16_t, u16Sel, 0); 738 738 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1); 739 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);739 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 740 740 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg); 741 741 IEM_MC_END(); … … 749 749 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 750 750 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 751 IEM_MC_FETCH_MEM_U16(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc);751 IEM_MC_FETCH_MEM_U16(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 752 752 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg); 753 753 IEM_MC_END(); … … 809 809 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 810 810 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 811 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);811 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 812 812 IEM_MC_CALL_CIMPL_2(iemCImpl_sgdt, iEffSeg, GCPtrEffSrc); 813 813 IEM_MC_END(); … … 859 859 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 860 860 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 861 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);861 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 862 862 IEM_MC_CALL_CIMPL_2(iemCImpl_sidt, iEffSeg, GCPtrEffSrc); 863 863 IEM_MC_END(); … … 871 871 IEMOP_MNEMONIC("monitor"); 872 872 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */ 873 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, p IemCpu->iEffSeg);873 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, pVCpu->iem.s.iEffSeg); 874 874 } 875 875 … … 892 892 IEM_MC_ARG(uint8_t, iEffSeg, 0); 893 893 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1); 894 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/p IemCpu->enmEffOpSize, 2);894 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pVCpu->iem.s.enmEffOpSize, 2); 895 895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 896 896 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 897 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);897 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 898 898 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg); 899 899 IEM_MC_END(); … … 906 906 { 907 907 IEMOP_MNEMONIC("xgetbv"); 908 if (IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fXSaveRstor)908 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 909 909 { 910 910 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); … … 919 919 { 920 920 IEMOP_MNEMONIC("xsetbv"); 921 if (IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fXSaveRstor)921 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 922 922 { 923 923 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); … … 931 931 FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm) 932 932 { 933 IEMMODE enmEffOpSize = p IemCpu->enmCpuMode == IEMMODE_64BIT933 IEMMODE enmEffOpSize = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT 934 934 ? IEMMODE_64BIT 935 : p IemCpu->enmEffOpSize;935 : pVCpu->iem.s.enmEffOpSize; 936 936 IEM_MC_BEGIN(3, 1); 937 937 IEM_MC_ARG(uint8_t, iEffSeg, 0); … … 940 940 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 941 941 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 942 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);942 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 943 943 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg); 944 944 IEM_MC_END(); … … 979 979 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 980 980 { 981 switch (p IemCpu->enmEffOpSize)981 switch (pVCpu->iem.s.enmEffOpSize) 982 982 { 983 983 case IEMMODE_16BIT: … … 985 985 IEM_MC_LOCAL(uint16_t, u16Tmp); 986 986 IEM_MC_FETCH_CR0_U16(u16Tmp); 987 if (IEM_GET_TARGET_CPU(p IemCpu) > IEMTARGETCPU_386)987 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386) 988 988 { /* likely */ } 989 else if (IEM_GET_TARGET_CPU(p IemCpu) >= IEMTARGETCPU_386)989 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386) 990 990 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xffe0); 991 991 else 992 992 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xfff0); 993 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u16Tmp);993 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u16Tmp); 994 994 IEM_MC_ADVANCE_RIP(); 995 995 IEM_MC_END(); … … 1000 1000 IEM_MC_LOCAL(uint32_t, u32Tmp); 1001 1001 IEM_MC_FETCH_CR0_U32(u32Tmp); 1002 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Tmp);1002 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Tmp); 1003 1003 IEM_MC_ADVANCE_RIP(); 1004 1004 IEM_MC_END(); … … 1009 1009 IEM_MC_LOCAL(uint64_t, u64Tmp); 1010 1010 IEM_MC_FETCH_CR0_U64(u64Tmp); 1011 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Tmp);1011 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Tmp); 1012 1012 IEM_MC_ADVANCE_RIP(); 1013 1013 IEM_MC_END(); … … 1025 1025 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 1026 1026 IEM_MC_FETCH_CR0_U16(u16Tmp); 1027 if (IEM_GET_TARGET_CPU(p IemCpu) > IEMTARGETCPU_386)1027 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386) 1028 1028 { /* likely */ } 1029 else if (p IemCpu->uTargetCpu >= IEMTARGETCPU_386)1029 else if (pVCpu->iem.s.uTargetCpu >= IEMTARGETCPU_386) 1030 1030 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xffe0); 1031 1031 else 1032 1032 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xfff0); 1033 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Tmp);1033 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Tmp); 1034 1034 IEM_MC_ADVANCE_RIP(); 1035 1035 IEM_MC_END(); … … 1051 1051 IEM_MC_BEGIN(1, 0); 1052 1052 IEM_MC_ARG(uint16_t, u16Tmp, 0); 1053 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);1053 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 1054 1054 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp); 1055 1055 IEM_MC_END(); … … 1061 1061 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 1062 1062 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 1063 IEM_MC_FETCH_MEM_U16(u16Tmp, p IemCpu->iEffSeg, GCPtrEffDst);1063 IEM_MC_FETCH_MEM_U16(u16Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 1064 1064 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp); 1065 1065 IEM_MC_END(); … … 1097 1097 FNIEMOP_DEF(iemOp_Grp7_rdtscp) 1098 1098 { 1099 NOREF(p IemCpu);1099 NOREF(pVCpu); 1100 1100 IEMOP_BITCH_ABOUT_STUB(); 1101 1101 return VERR_IEM_INSTR_NOT_IMPLEMENTED; … … 1189 1189 { 1190 1190 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 1191 switch (p IemCpu->enmEffOpSize)1191 switch (pVCpu->iem.s.enmEffOpSize) 1192 1192 { 1193 1193 case IEMMODE_16BIT: … … 1199 1199 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); 1200 1200 1201 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1202 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);1201 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1202 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 1203 1203 IEM_MC_REF_EFLAGS(pEFlags); 1204 1204 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg); … … 1217 1217 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); 1218 1218 1219 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1220 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);1219 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1220 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 1221 1221 IEM_MC_REF_EFLAGS(pEFlags); 1222 1222 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg); … … 1231 1231 else 1232 1232 { 1233 switch (p IemCpu->enmEffOpSize)1233 switch (pVCpu->iem.s.enmEffOpSize) 1234 1234 { 1235 1235 case IEMMODE_16BIT: … … 1245 1245 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 1246 1246 1247 IEM_MC_FETCH_MEM_U16(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc);1248 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1247 IEM_MC_FETCH_MEM_U16(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 1248 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1249 1249 IEM_MC_REF_EFLAGS(pEFlags); 1250 1250 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg); … … 1268 1268 /** @todo testcase: make sure it's a 16-bit read. */ 1269 1269 1270 IEM_MC_FETCH_MEM_U16(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc);1271 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1270 IEM_MC_FETCH_MEM_U16(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 1271 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1272 1272 IEM_MC_REF_EFLAGS(pEFlags); 1273 1273 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg); … … 1357 1357 { 1358 1358 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */ 1359 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->f3DNowPrefetch)1359 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->f3DNowPrefetch) 1360 1360 { 1361 1361 IEMOP_MNEMONIC("GrpP"); … … 1474 1474 FNIEMOP_DEF(iemOp_3Dnow) 1475 1475 { 1476 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->f3DNow)1476 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->f3DNow) 1477 1477 { 1478 1478 IEMOP_MNEMONIC("3Dnow"); … … 1522 1522 { 1523 1523 /* Quick hack. Need to restructure all of this later some time. */ 1524 uint32_t const fRelevantPrefix = p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ);1524 uint32_t const fRelevantPrefix = pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ); 1525 1525 if (fRelevantPrefix == 0) 1526 1526 { … … 1536 1536 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 1537 1537 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 1538 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB,1539 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1538 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1539 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1540 1540 IEM_MC_ADVANCE_RIP(); 1541 1541 IEM_MC_END(); … … 1555 1555 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 1556 1556 1557 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1558 IEM_MC_STORE_MEM_U128(p IemCpu->iEffSeg, GCPtrEffSrc, uSrc);1557 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1558 IEM_MC_STORE_MEM_U128(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 1559 1559 1560 1560 IEM_MC_ADVANCE_RIP(); … … 1577 1577 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 1578 1578 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 1579 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1580 IEM_MC_STORE_XREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, uSrc);1579 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1580 IEM_MC_STORE_XREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, uSrc); 1581 1581 1582 1582 IEM_MC_ADVANCE_RIP(); … … 1597 1597 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 1598 1598 1599 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1600 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffSrc, uSrc);1599 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1600 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 1601 1601 1602 1602 IEM_MC_ADVANCE_RIP(); … … 1621 1621 { 1622 1622 /* Quick hack. Need to restructure all of this later some time. */ 1623 if (p IemCpu->fPrefixes == IEM_OP_PRF_SIZE_OP)1623 if (pVCpu->iem.s.fPrefixes == IEM_OP_PRF_SIZE_OP) 1624 1624 { 1625 1625 IEMOP_MNEMONIC("movlpd Mq,Vq"); … … 1636 1636 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 1637 1637 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 1638 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1639 IEM_MC_STORE_XREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, uSrc);1638 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1639 IEM_MC_STORE_XREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, uSrc); 1640 1640 IEM_MC_ADVANCE_RIP(); 1641 1641 IEM_MC_END(); … … 1658 1658 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 1659 1659 1660 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1661 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffSrc, uSrc);1660 IEM_MC_FETCH_XREG_U64(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1661 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 1662 1662 1663 1663 IEM_MC_ADVANCE_RIP(); … … 1745 1745 IEMOP_MNEMONIC("mov Rd,Cd"); 1746 1746 IEMOP_HLP_MIN_386(); 1747 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)1748 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;1747 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1748 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; 1749 1749 else 1750 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;1750 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT; 1751 1751 1752 1752 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1753 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg;1754 if (p IemCpu->fPrefixes & IEM_OP_PRF_LOCK)1753 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 1754 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) 1755 1755 { 1756 1756 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ 1757 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fMovCr8In32Bit)1757 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMovCr8In32Bit) 1758 1758 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ 1759 1759 iCrReg |= 8; … … 1768 1768 IEMOP_HLP_DONE_DECODING(); 1769 1769 1770 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | p IemCpu->uRexB, iCrReg);1770 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pVCpu->iem.s.uRexB, iCrReg); 1771 1771 } 1772 1772 … … 1779 1779 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1780 1780 IEMOP_HLP_NO_LOCK_PREFIX(); 1781 if (p IemCpu->fPrefixes & IEM_OP_PRF_REX_R)1781 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX_R) 1782 1782 return IEMOP_RAISE_INVALID_OPCODE(); 1783 1783 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd, 1784 (X86_MODRM_RM_MASK & bRm) | p IemCpu->uRexB,1784 (X86_MODRM_RM_MASK & bRm) | pVCpu->iem.s.uRexB, 1785 1785 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)); 1786 1786 } … … 1793 1793 IEMOP_MNEMONIC("mov Cd,Rd"); 1794 1794 IEMOP_HLP_MIN_386(); 1795 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)1796 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;1795 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1796 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; 1797 1797 else 1798 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;1798 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT; 1799 1799 1800 1800 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1801 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg;1802 if (p IemCpu->fPrefixes & IEM_OP_PRF_LOCK)1801 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 1802 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) 1803 1803 { 1804 1804 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ 1805 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fMovCr8In32Bit)1805 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMovCr8In32Bit) 1806 1806 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ 1807 1807 iCrReg |= 8; … … 1816 1816 IEMOP_HLP_DONE_DECODING(); 1817 1817 1818 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | p IemCpu->uRexB);1818 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pVCpu->iem.s.uRexB); 1819 1819 } 1820 1820 … … 1827 1827 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1828 1828 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1829 if (p IemCpu->fPrefixes & IEM_OP_PRF_REX_R)1829 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX_R) 1830 1830 return IEMOP_RAISE_INVALID_OPCODE(); 1831 1831 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd, 1832 1832 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK), 1833 (X86_MODRM_RM_MASK & bRm) | p IemCpu->uRexB);1833 (X86_MODRM_RM_MASK & bRm) | pVCpu->iem.s.uRexB); 1834 1834 } 1835 1835 … … 1858 1858 FNIEMOP_DEF(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd) 1859 1859 { 1860 IEMOP_MNEMONIC(!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movaps r,mr" : "movapd r,mr");1860 IEMOP_MNEMONIC(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movaps r,mr" : "movapd r,mr"); 1861 1861 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1862 1862 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 1867 1867 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); 1868 1868 IEM_MC_BEGIN(0, 0); 1869 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))1869 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP)) 1870 1870 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 1871 1871 else 1872 1872 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 1873 1873 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 1874 IEM_MC_COPY_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg,1875 (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);1874 IEM_MC_COPY_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, 1875 (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 1876 1876 IEM_MC_ADVANCE_RIP(); 1877 1877 IEM_MC_END(); … … 1888 1888 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1889 1889 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); /** @todo check if this is delayed this long for REPZ/NZ */ 1890 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))1890 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP)) 1891 1891 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 1892 1892 else … … 1894 1894 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 1895 1895 1896 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);1897 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, uSrc);1896 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 1897 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, uSrc); 1898 1898 1899 1899 IEM_MC_ADVANCE_RIP(); … … 1907 1907 FNIEMOP_DEF(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd) 1908 1908 { 1909 IEMOP_MNEMONIC(!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movaps mr,r" : "movapd mr,r");1909 IEMOP_MNEMONIC(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movaps mr,r" : "movapd mr,r"); 1910 1910 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1911 1911 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 1916 1916 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); 1917 1917 IEM_MC_BEGIN(0, 0); 1918 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))1918 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP)) 1919 1919 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 1920 1920 else 1921 1921 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 1922 1922 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 1923 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB,1924 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1923 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1924 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1925 1925 IEM_MC_ADVANCE_RIP(); 1926 1926 IEM_MC_END(); … … 1937 1937 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1938 1938 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); /** @todo check if this is delayed this long for REPZ/NZ */ 1939 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))1939 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP)) 1940 1940 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 1941 1941 else … … 1943 1943 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 1944 1944 1945 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1946 IEM_MC_STORE_MEM_U128_ALIGN_SSE(p IemCpu->iEffSeg, GCPtrEffSrc, uSrc);1945 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1946 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 1947 1947 1948 1948 IEM_MC_ADVANCE_RIP(); … … 1960 1960 FNIEMOP_DEF(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd) 1961 1961 { 1962 IEMOP_MNEMONIC(!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movntps mr,r" : "movntpd mr,r");1962 IEMOP_MNEMONIC(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movntps mr,r" : "movntpd mr,r"); 1963 1963 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1964 1964 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) … … 1973 1973 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1974 1974 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); /** @todo check if this is delayed this long for REPZ/NZ */ 1975 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))1975 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP)) 1976 1976 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 1977 1977 else … … 1979 1979 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 1980 1980 1981 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);1982 IEM_MC_STORE_MEM_U128_ALIGN_SSE(p IemCpu->iEffSeg, GCPtrEffSrc, uSrc);1981 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 1982 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 1983 1983 1984 1984 IEM_MC_ADVANCE_RIP(); … … 2055 2055 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \ 2056 2056 { \ 2057 switch (p IemCpu->enmEffOpSize) \2057 switch (pVCpu->iem.s.enmEffOpSize) \ 2058 2058 { \ 2059 2059 case IEMMODE_16BIT: \ … … 2061 2061 IEM_MC_LOCAL(uint16_t, u16Tmp); \ 2062 2062 a_Cnd { \ 2063 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB); \2064 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Tmp); \2063 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); \ 2064 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Tmp); \ 2065 2065 } IEM_MC_ENDIF(); \ 2066 2066 IEM_MC_ADVANCE_RIP(); \ … … 2072 2072 IEM_MC_LOCAL(uint32_t, u32Tmp); \ 2073 2073 a_Cnd { \ 2074 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB); \2075 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp); \2074 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); \ 2075 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); \ 2076 2076 } IEM_MC_ELSE() { \ 2077 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg); \2077 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); \ 2078 2078 } IEM_MC_ENDIF(); \ 2079 2079 IEM_MC_ADVANCE_RIP(); \ … … 2085 2085 IEM_MC_LOCAL(uint64_t, u64Tmp); \ 2086 2086 a_Cnd { \ 2087 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB); \2088 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp); \2087 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); \ 2088 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); \ 2089 2089 } IEM_MC_ENDIF(); \ 2090 2090 IEM_MC_ADVANCE_RIP(); \ … … 2097 2097 else \ 2098 2098 { \ 2099 switch (p IemCpu->enmEffOpSize) \2099 switch (pVCpu->iem.s.enmEffOpSize) \ 2100 2100 { \ 2101 2101 case IEMMODE_16BIT: \ … … 2104 2104 IEM_MC_LOCAL(uint16_t, u16Tmp); \ 2105 2105 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ 2106 IEM_MC_FETCH_MEM_U16(u16Tmp, p IemCpu->iEffSeg, GCPtrEffSrc); \2106 IEM_MC_FETCH_MEM_U16(u16Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 2107 2107 a_Cnd { \ 2108 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Tmp); \2108 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Tmp); \ 2109 2109 } IEM_MC_ENDIF(); \ 2110 2110 IEM_MC_ADVANCE_RIP(); \ … … 2117 2117 IEM_MC_LOCAL(uint32_t, u32Tmp); \ 2118 2118 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ 2119 IEM_MC_FETCH_MEM_U32(u32Tmp, p IemCpu->iEffSeg, GCPtrEffSrc); \2119 IEM_MC_FETCH_MEM_U32(u32Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 2120 2120 a_Cnd { \ 2121 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp); \2121 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); \ 2122 2122 } IEM_MC_ELSE() { \ 2123 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg); \2123 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); \ 2124 2124 } IEM_MC_ENDIF(); \ 2125 2125 IEM_MC_ADVANCE_RIP(); \ … … 2132 2132 IEM_MC_LOCAL(uint64_t, u64Tmp); \ 2133 2133 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ 2134 IEM_MC_FETCH_MEM_U64(u64Tmp, p IemCpu->iEffSeg, GCPtrEffSrc); \2134 IEM_MC_FETCH_MEM_U64(u64Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 2135 2135 a_Cnd { \ 2136 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp); \2136 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); \ 2137 2137 } IEM_MC_ENDIF(); \ 2138 2138 IEM_MC_ADVANCE_RIP(); \ … … 2323 2323 { 2324 2324 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2325 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))2325 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2326 2326 { 2327 2327 case IEM_OP_PRF_SIZE_OP: /* SSE */ … … 2337 2337 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2338 2338 IEM_MC_PREPARE_SSE_USAGE(); 2339 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);2340 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2339 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2340 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2341 2341 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2342 2342 IEM_MC_ADVANCE_RIP(); … … 2357 2357 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2358 2358 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2359 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);2359 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2360 2360 2361 2361 IEM_MC_PREPARE_SSE_USAGE(); 2362 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);2362 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2363 2363 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2364 2364 … … 2404 2404 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2405 2405 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2406 IEM_MC_FETCH_MEM_U32(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);2406 IEM_MC_FETCH_MEM_U32(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2407 2407 2408 2408 IEM_MC_PREPARE_FPU_USAGE(); … … 2471 2471 { 2472 2472 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2473 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))2473 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2474 2474 { 2475 2475 case IEM_OP_PRF_SIZE_OP: /* SSE */ … … 2485 2485 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2486 2486 IEM_MC_PREPARE_SSE_USAGE(); 2487 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);2488 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2487 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2488 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2489 2489 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2490 2490 IEM_MC_ADVANCE_RIP(); … … 2505 2505 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2506 2506 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2507 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */2507 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */ 2508 2508 2509 2509 IEM_MC_PREPARE_SSE_USAGE(); 2510 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);2510 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2511 2511 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2512 2512 … … 2552 2552 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2553 2553 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2554 IEM_MC_FETCH_MEM_U64(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);2554 IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2555 2555 2556 2556 IEM_MC_PREPARE_FPU_USAGE(); … … 2616 2616 { 2617 2617 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2618 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))2618 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2619 2619 { 2620 2620 case IEM_OP_PRF_SIZE_OP: /* SSE */ … … 2627 2627 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2628 2628 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 2629 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)2629 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2630 2630 { 2631 2631 IEM_MC_LOCAL(uint64_t, u64Tmp); 2632 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2633 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp);2632 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2633 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); 2634 2634 } 2635 2635 else 2636 2636 { 2637 2637 IEM_MC_LOCAL(uint32_t, u32Tmp); 2638 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2639 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp);2638 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2639 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); 2640 2640 } 2641 2641 IEM_MC_ADVANCE_RIP(); … … 2651 2651 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2652 2652 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 2653 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)2653 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2654 2654 { 2655 2655 IEM_MC_LOCAL(uint64_t, u64Tmp); 2656 IEM_MC_FETCH_MEM_U64(u64Tmp, p IemCpu->iEffSeg, GCPtrEffSrc);2657 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp);2656 IEM_MC_FETCH_MEM_U64(u64Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2657 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); 2658 2658 } 2659 2659 else 2660 2660 { 2661 2661 IEM_MC_LOCAL(uint32_t, u32Tmp); 2662 IEM_MC_FETCH_MEM_U32(u32Tmp, p IemCpu->iEffSeg, GCPtrEffSrc);2663 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp);2662 IEM_MC_FETCH_MEM_U32(u32Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2663 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); 2664 2664 } 2665 2665 IEM_MC_ADVANCE_RIP(); … … 2678 2678 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE(); 2679 2679 IEM_MC_LOCAL(uint64_t, u64Tmp); 2680 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)2681 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2680 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2681 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2682 2682 else 2683 IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2683 IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2684 2684 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); 2685 2685 IEM_MC_ADVANCE_RIP(); … … 2695 2695 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2696 2696 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE(); 2697 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)2697 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2698 2698 { 2699 2699 IEM_MC_LOCAL(uint64_t, u64Tmp); 2700 IEM_MC_FETCH_MEM_U64(u64Tmp, p IemCpu->iEffSeg, GCPtrEffSrc);2700 IEM_MC_FETCH_MEM_U64(u64Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2701 2701 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); 2702 2702 } … … 2704 2704 { 2705 2705 IEM_MC_LOCAL(uint32_t, u32Tmp); 2706 IEM_MC_FETCH_MEM_U32(u32Tmp, p IemCpu->iEffSeg, GCPtrEffSrc);2706 IEM_MC_FETCH_MEM_U32(u32Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2707 2707 IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp); 2708 2708 } … … 2723 2723 bool fAligned = false; 2724 2724 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2725 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))2725 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2726 2726 { 2727 2727 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */ … … 2741 2741 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2742 2742 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 2743 IEM_MC_COPY_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg,2744 (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2743 IEM_MC_COPY_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, 2744 (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2745 2745 IEM_MC_ADVANCE_RIP(); 2746 2746 IEM_MC_END(); … … 2760 2760 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 2761 2761 if (fAligned) 2762 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, p IemCpu->iEffSeg, GCPtrEffSrc);2762 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2763 2763 else 2764 IEM_MC_FETCH_MEM_U128(u128Tmp, p IemCpu->iEffSeg, GCPtrEffSrc);2765 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u128Tmp);2764 IEM_MC_FETCH_MEM_U128(u128Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2765 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u128Tmp); 2766 2766 2767 2767 IEM_MC_ADVANCE_RIP(); … … 2802 2802 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2803 2803 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE(); 2804 IEM_MC_FETCH_MEM_U64(u64Tmp, p IemCpu->iEffSeg, GCPtrEffSrc);2804 IEM_MC_FETCH_MEM_U64(u64Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2805 2805 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); 2806 2806 … … 2820 2820 { 2821 2821 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2822 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))2822 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2823 2823 { 2824 2824 case IEM_OP_PRF_SIZE_OP: /* SSE */ … … 2827 2827 { 2828 2828 PFNIEMAIMPLMEDIAPSHUF pfnAImpl; 2829 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))2829 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2830 2830 { 2831 2831 case IEM_OP_PRF_SIZE_OP: … … 2857 2857 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2858 2858 IEM_MC_PREPARE_SSE_USAGE(); 2859 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);2860 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);2859 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2860 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2861 2861 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg); 2862 2862 IEM_MC_ADVANCE_RIP(); … … 2880 2880 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2881 2881 2882 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);2882 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2883 2883 IEM_MC_PREPARE_SSE_USAGE(); 2884 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);2884 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2885 2885 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg); 2886 2886 … … 2930 2930 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT(); 2931 2931 2932 IEM_MC_FETCH_MEM_U64(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);2932 IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2933 2933 IEM_MC_PREPARE_FPU_USAGE(); 2934 2934 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); … … 2976 2976 return IEMOP_RAISE_INVALID_OPCODE(); 2977 2977 case 2: 2978 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))2978 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 2979 2979 { 2980 2980 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Nq_Ib, bRm); … … 2983 2983 } 2984 2984 case 4: 2985 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))2985 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 2986 2986 { 2987 2987 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Nq_Ib, bRm); … … 2990 2990 } 2991 2991 case 6: 2992 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))2992 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 2993 2993 { 2994 2994 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Nq_Ib, bRm); … … 3031 3031 return IEMOP_RAISE_INVALID_OPCODE(); 3032 3032 case 2: 3033 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))3033 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 3034 3034 { 3035 3035 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Nq_Ib, bRm); … … 3038 3038 } 3039 3039 case 4: 3040 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))3040 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 3041 3041 { 3042 3042 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Nq_Ib, bRm); … … 3045 3045 } 3046 3046 case 6: 3047 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))3047 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 3048 3048 { 3049 3049 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Nq_Ib, bRm); … … 3086 3086 return IEMOP_RAISE_INVALID_OPCODE(); 3087 3087 case 2: 3088 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))3088 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 3089 3089 { 3090 3090 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Nq_Ib, bRm); … … 3093 3093 } 3094 3094 case 3: 3095 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))3095 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 3096 3096 { 3097 3097 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrldq_Udq_Ib, bRm); … … 3099 3099 } 3100 3100 case 6: 3101 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))3101 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 3102 3102 { 3103 3103 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Nq_Ib, bRm); … … 3106 3106 } 3107 3107 case 7: 3108 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))3108 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 3109 3109 { 3110 3110 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_pslldq_Udq_Ib, bRm); … … 3127 3127 { 3128 3128 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 3129 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))3129 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 3130 3130 { 3131 3131 case IEM_OP_PRF_SIZE_OP: /* SSE */ … … 3141 3141 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 3142 3142 IEM_MC_PREPARE_SSE_USAGE(); 3143 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3144 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);3143 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3144 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 3145 3145 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 3146 3146 IEM_MC_ADVANCE_RIP(); … … 3161 3161 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 3162 3162 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 3163 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);3163 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 3164 3164 3165 3165 IEM_MC_PREPARE_SSE_USAGE(); 3166 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3166 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3167 3167 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 3168 3168 … … 3206 3206 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 3207 3207 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 3208 IEM_MC_FETCH_MEM_U64(uSrc, p IemCpu->iEffSeg, GCPtrEffSrc);3208 IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 3209 3209 3210 3210 IEM_MC_PREPARE_FPU_USAGE(); … … 3263 3263 { 3264 3264 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 3265 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))3265 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 3266 3266 { 3267 3267 case IEM_OP_PRF_SIZE_OP: /* SSE */ … … 3274 3274 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 3275 3275 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 3276 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)3276 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 3277 3277 { 3278 3278 IEM_MC_LOCAL(uint64_t, u64Tmp); 3279 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3280 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Tmp);3279 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3280 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Tmp); 3281 3281 } 3282 3282 else 3283 3283 { 3284 3284 IEM_MC_LOCAL(uint32_t, u32Tmp); 3285 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3286 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Tmp);3285 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3286 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Tmp); 3287 3287 } 3288 3288 IEM_MC_ADVANCE_RIP(); … … 3298 3298 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 3299 3299 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 3300 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)3300 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 3301 3301 { 3302 3302 IEM_MC_LOCAL(uint64_t, u64Tmp); 3303 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3304 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);3303 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3304 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, u64Tmp); 3305 3305 } 3306 3306 else 3307 3307 { 3308 3308 IEM_MC_LOCAL(uint32_t, u32Tmp); 3309 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3310 IEM_MC_STORE_MEM_U32(p IemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);3309 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3310 IEM_MC_STORE_MEM_U32(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, u32Tmp); 3311 3311 } 3312 3312 IEM_MC_ADVANCE_RIP(); … … 3324 3324 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 3325 3325 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 3326 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)3326 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 3327 3327 { 3328 3328 IEM_MC_LOCAL(uint64_t, u64Tmp); 3329 3329 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 3330 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Tmp);3330 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Tmp); 3331 3331 } 3332 3332 else … … 3334 3334 IEM_MC_LOCAL(uint32_t, u32Tmp); 3335 3335 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 3336 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Tmp);3336 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Tmp); 3337 3337 } 3338 3338 IEM_MC_ADVANCE_RIP(); … … 3348 3348 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 3349 3349 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 3350 if (p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)3350 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 3351 3351 { 3352 3352 IEM_MC_LOCAL(uint64_t, u64Tmp); 3353 3353 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 3354 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);3354 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, u64Tmp); 3355 3355 } 3356 3356 else … … 3358 3358 IEM_MC_LOCAL(uint32_t, u32Tmp); 3359 3359 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 3360 IEM_MC_STORE_MEM_U32(p IemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);3360 IEM_MC_STORE_MEM_U32(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, u32Tmp); 3361 3361 } 3362 3362 IEM_MC_ADVANCE_RIP(); … … 3376 3376 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 3377 3377 bool fAligned = false; 3378 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))3378 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 3379 3379 { 3380 3380 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */ … … 3394 3394 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 3395 3395 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 3396 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB,3397 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3396 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 3397 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3398 3398 IEM_MC_ADVANCE_RIP(); 3399 3399 IEM_MC_END(); … … 3413 3413 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 3414 3414 3415 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);3415 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 3416 3416 if (fAligned) 3417 IEM_MC_STORE_MEM_U128_ALIGN_SSE(p IemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);3417 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, u128Tmp); 3418 3418 else 3419 IEM_MC_STORE_MEM_U128(p IemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);3419 IEM_MC_STORE_MEM_U128(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, u128Tmp); 3420 3420 3421 3421 IEM_MC_ADVANCE_RIP(); … … 3459 3459 3460 3460 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 3461 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);3461 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, u64Tmp); 3462 3462 3463 3463 IEM_MC_ADVANCE_RIP(); … … 3479 3479 IEMOP_HLP_MIN_386(); 3480 3480 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3481 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3481 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3482 3482 { 3483 3483 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3515 3515 IEMOP_HLP_MIN_386(); 3516 3516 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3517 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3517 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3518 3518 { 3519 3519 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3551 3551 IEMOP_HLP_MIN_386(); 3552 3552 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3553 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3553 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3554 3554 { 3555 3555 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3587 3587 IEMOP_HLP_MIN_386(); 3588 3588 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3589 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3589 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3590 3590 { 3591 3591 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3623 3623 IEMOP_HLP_MIN_386(); 3624 3624 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3625 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3625 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3626 3626 { 3627 3627 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3659 3659 IEMOP_HLP_MIN_386(); 3660 3660 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3661 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3661 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3662 3662 { 3663 3663 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3695 3695 IEMOP_HLP_MIN_386(); 3696 3696 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3697 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3697 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3698 3698 { 3699 3699 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3731 3731 IEMOP_HLP_MIN_386(); 3732 3732 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3733 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3733 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3734 3734 { 3735 3735 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3767 3767 IEMOP_HLP_MIN_386(); 3768 3768 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3769 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3769 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3770 3770 { 3771 3771 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3803 3803 IEMOP_HLP_MIN_386(); 3804 3804 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3805 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3805 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3806 3806 { 3807 3807 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3839 3839 IEMOP_HLP_MIN_386(); 3840 3840 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3841 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3841 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3842 3842 { 3843 3843 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3875 3875 IEMOP_HLP_MIN_386(); 3876 3876 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3877 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3877 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3878 3878 { 3879 3879 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3911 3911 IEMOP_HLP_MIN_386(); 3912 3912 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3913 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3913 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3914 3914 { 3915 3915 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3947 3947 IEMOP_HLP_MIN_386(); 3948 3948 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3949 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3949 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3950 3950 { 3951 3951 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 3983 3983 IEMOP_HLP_MIN_386(); 3984 3984 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 3985 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)3985 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 3986 3986 { 3987 3987 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 4019 4019 IEMOP_HLP_MIN_386(); 4020 4020 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 4021 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)4021 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 4022 4022 { 4023 4023 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm); … … 4065 4065 IEM_MC_BEGIN(0, 0); 4066 4066 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 4067 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4067 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4068 4068 } IEM_MC_ELSE() { 4069 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4069 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4070 4070 } IEM_MC_ENDIF(); 4071 4071 IEM_MC_ADVANCE_RIP(); … … 4079 4079 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4080 4080 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 4081 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4081 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4082 4082 } IEM_MC_ELSE() { 4083 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4083 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4084 4084 } IEM_MC_ENDIF(); 4085 4085 IEM_MC_ADVANCE_RIP(); … … 4106 4106 IEM_MC_BEGIN(0, 0); 4107 4107 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 4108 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4108 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4109 4109 } IEM_MC_ELSE() { 4110 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4110 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4111 4111 } IEM_MC_ENDIF(); 4112 4112 IEM_MC_ADVANCE_RIP(); … … 4120 4120 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4121 4121 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) { 4122 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4122 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4123 4123 } IEM_MC_ELSE() { 4124 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4124 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4125 4125 } IEM_MC_ENDIF(); 4126 4126 IEM_MC_ADVANCE_RIP(); … … 4147 4147 IEM_MC_BEGIN(0, 0); 4148 4148 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 4149 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4149 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4150 4150 } IEM_MC_ELSE() { 4151 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4151 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4152 4152 } IEM_MC_ENDIF(); 4153 4153 IEM_MC_ADVANCE_RIP(); … … 4161 4161 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4162 4162 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 4163 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4163 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4164 4164 } IEM_MC_ELSE() { 4165 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4165 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4166 4166 } IEM_MC_ENDIF(); 4167 4167 IEM_MC_ADVANCE_RIP(); … … 4188 4188 IEM_MC_BEGIN(0, 0); 4189 4189 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 4190 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4190 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4191 4191 } IEM_MC_ELSE() { 4192 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4192 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4193 4193 } IEM_MC_ENDIF(); 4194 4194 IEM_MC_ADVANCE_RIP(); … … 4202 4202 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4203 4203 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) { 4204 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4204 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4205 4205 } IEM_MC_ELSE() { 4206 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4206 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4207 4207 } IEM_MC_ENDIF(); 4208 4208 IEM_MC_ADVANCE_RIP(); … … 4229 4229 IEM_MC_BEGIN(0, 0); 4230 4230 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 4231 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4231 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4232 4232 } IEM_MC_ELSE() { 4233 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4233 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4234 4234 } IEM_MC_ENDIF(); 4235 4235 IEM_MC_ADVANCE_RIP(); … … 4243 4243 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4244 4244 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 4245 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4245 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4246 4246 } IEM_MC_ELSE() { 4247 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4247 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4248 4248 } IEM_MC_ENDIF(); 4249 4249 IEM_MC_ADVANCE_RIP(); … … 4270 4270 IEM_MC_BEGIN(0, 0); 4271 4271 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 4272 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4272 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4273 4273 } IEM_MC_ELSE() { 4274 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4274 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4275 4275 } IEM_MC_ENDIF(); 4276 4276 IEM_MC_ADVANCE_RIP(); … … 4284 4284 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4285 4285 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) { 4286 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4286 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4287 4287 } IEM_MC_ELSE() { 4288 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4288 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4289 4289 } IEM_MC_ENDIF(); 4290 4290 IEM_MC_ADVANCE_RIP(); … … 4311 4311 IEM_MC_BEGIN(0, 0); 4312 4312 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 4313 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4313 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4314 4314 } IEM_MC_ELSE() { 4315 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4315 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4316 4316 } IEM_MC_ENDIF(); 4317 4317 IEM_MC_ADVANCE_RIP(); … … 4325 4325 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4326 4326 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 4327 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4327 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4328 4328 } IEM_MC_ELSE() { 4329 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4329 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4330 4330 } IEM_MC_ENDIF(); 4331 4331 IEM_MC_ADVANCE_RIP(); … … 4352 4352 IEM_MC_BEGIN(0, 0); 4353 4353 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 4354 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4354 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4355 4355 } IEM_MC_ELSE() { 4356 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4356 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4357 4357 } IEM_MC_ENDIF(); 4358 4358 IEM_MC_ADVANCE_RIP(); … … 4366 4366 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4367 4367 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) { 4368 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4368 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4369 4369 } IEM_MC_ELSE() { 4370 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4370 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4371 4371 } IEM_MC_ENDIF(); 4372 4372 IEM_MC_ADVANCE_RIP(); … … 4393 4393 IEM_MC_BEGIN(0, 0); 4394 4394 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 4395 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4395 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4396 4396 } IEM_MC_ELSE() { 4397 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4397 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4398 4398 } IEM_MC_ENDIF(); 4399 4399 IEM_MC_ADVANCE_RIP(); … … 4407 4407 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4408 4408 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 4409 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4409 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4410 4410 } IEM_MC_ELSE() { 4411 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4411 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4412 4412 } IEM_MC_ENDIF(); 4413 4413 IEM_MC_ADVANCE_RIP(); … … 4434 4434 IEM_MC_BEGIN(0, 0); 4435 4435 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 4436 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4436 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4437 4437 } IEM_MC_ELSE() { 4438 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4438 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4439 4439 } IEM_MC_ENDIF(); 4440 4440 IEM_MC_ADVANCE_RIP(); … … 4448 4448 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4449 4449 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) { 4450 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4450 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4451 4451 } IEM_MC_ELSE() { 4452 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4452 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4453 4453 } IEM_MC_ENDIF(); 4454 4454 IEM_MC_ADVANCE_RIP(); … … 4475 4475 IEM_MC_BEGIN(0, 0); 4476 4476 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 4477 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4477 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4478 4478 } IEM_MC_ELSE() { 4479 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4479 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4480 4480 } IEM_MC_ENDIF(); 4481 4481 IEM_MC_ADVANCE_RIP(); … … 4489 4489 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4490 4490 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 4491 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4491 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4492 4492 } IEM_MC_ELSE() { 4493 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4493 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4494 4494 } IEM_MC_ENDIF(); 4495 4495 IEM_MC_ADVANCE_RIP(); … … 4516 4516 IEM_MC_BEGIN(0, 0); 4517 4517 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 4518 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4518 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4519 4519 } IEM_MC_ELSE() { 4520 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4520 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4521 4521 } IEM_MC_ENDIF(); 4522 4522 IEM_MC_ADVANCE_RIP(); … … 4530 4530 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4531 4531 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) { 4532 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4532 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4533 4533 } IEM_MC_ELSE() { 4534 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4534 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4535 4535 } IEM_MC_ENDIF(); 4536 4536 IEM_MC_ADVANCE_RIP(); … … 4557 4557 IEM_MC_BEGIN(0, 0); 4558 4558 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 4559 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4559 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4560 4560 } IEM_MC_ELSE() { 4561 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4561 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4562 4562 } IEM_MC_ENDIF(); 4563 4563 IEM_MC_ADVANCE_RIP(); … … 4571 4571 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4572 4572 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 4573 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4573 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4574 4574 } IEM_MC_ELSE() { 4575 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4575 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4576 4576 } IEM_MC_ENDIF(); 4577 4577 IEM_MC_ADVANCE_RIP(); … … 4598 4598 IEM_MC_BEGIN(0, 0); 4599 4599 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 4600 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4600 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4601 4601 } IEM_MC_ELSE() { 4602 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4602 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4603 4603 } IEM_MC_ENDIF(); 4604 4604 IEM_MC_ADVANCE_RIP(); … … 4612 4612 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4613 4613 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) { 4614 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4614 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4615 4615 } IEM_MC_ELSE() { 4616 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4616 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4617 4617 } IEM_MC_ENDIF(); 4618 4618 IEM_MC_ADVANCE_RIP(); … … 4639 4639 IEM_MC_BEGIN(0, 0); 4640 4640 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 4641 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4641 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4642 4642 } IEM_MC_ELSE() { 4643 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4643 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4644 4644 } IEM_MC_ENDIF(); 4645 4645 IEM_MC_ADVANCE_RIP(); … … 4653 4653 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4654 4654 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 4655 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4655 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4656 4656 } IEM_MC_ELSE() { 4657 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4657 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4658 4658 } IEM_MC_ENDIF(); 4659 4659 IEM_MC_ADVANCE_RIP(); … … 4680 4680 IEM_MC_BEGIN(0, 0); 4681 4681 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 4682 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 0);4682 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 0); 4683 4683 } IEM_MC_ELSE() { 4684 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, 1);4684 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 1); 4685 4685 } IEM_MC_ENDIF(); 4686 4686 IEM_MC_ADVANCE_RIP(); … … 4694 4694 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4695 4695 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) { 4696 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 0);4696 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4697 4697 } IEM_MC_ELSE() { 4698 IEM_MC_STORE_MEM_U8_CONST(p IemCpu->iEffSeg, GCPtrEffDst, 1);4698 IEM_MC_STORE_MEM_U8_CONST(pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1); 4699 4699 } IEM_MC_ENDIF(); 4700 4700 IEM_MC_ADVANCE_RIP(); … … 4715 4715 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 4716 4716 4717 switch (p IemCpu->enmEffOpSize)4717 switch (pVCpu->iem.s.enmEffOpSize) 4718 4718 { 4719 4719 case IEMMODE_16BIT: … … 4765 4765 IEMOP_HLP_MIN_386(); 4766 4766 IEMOP_HLP_NO_LOCK_PREFIX(); 4767 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, p IemCpu->enmEffOpSize);4767 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pVCpu->iem.s.enmEffOpSize); 4768 4768 } 4769 4769 … … 4792 4792 /* register destination. */ 4793 4793 IEMOP_HLP_NO_LOCK_PREFIX(); 4794 switch (p IemCpu->enmEffOpSize)4794 switch (pVCpu->iem.s.enmEffOpSize) 4795 4795 { 4796 4796 case IEMMODE_16BIT: … … 4800 4800 IEM_MC_ARG(uint32_t *, pEFlags, 2); 4801 4801 4802 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);4802 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4803 4803 IEM_MC_AND_LOCAL_U16(u16Src, 0xf); 4804 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);4804 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4805 4805 IEM_MC_REF_EFLAGS(pEFlags); 4806 4806 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); … … 4816 4816 IEM_MC_ARG(uint32_t *, pEFlags, 2); 4817 4817 4818 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);4818 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4819 4819 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f); 4820 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);4820 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4821 4821 IEM_MC_REF_EFLAGS(pEFlags); 4822 4822 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); … … 4833 4833 IEM_MC_ARG(uint32_t *, pEFlags, 2); 4834 4834 4835 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);4835 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4836 4836 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f); 4837 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);4837 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4838 4838 IEM_MC_REF_EFLAGS(pEFlags); 4839 4839 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); … … 4862 4862 4863 4863 /** @todo test negative bit offsets! */ 4864 switch (p IemCpu->enmEffOpSize)4864 switch (pVCpu->iem.s.enmEffOpSize) 4865 4865 { 4866 4866 case IEMMODE_16BIT: … … 4873 4873 4874 4874 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4875 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);4875 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4876 4876 IEM_MC_ASSIGN(i16AddrAdj, u16Src); 4877 4877 IEM_MC_AND_ARG_U16(u16Src, 0x0f); … … 4881 4881 IEM_MC_FETCH_EFLAGS(EFlags); 4882 4882 4883 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);4884 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))4883 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4884 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 4885 4885 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); 4886 4886 else … … 4902 4902 4903 4903 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4904 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);4904 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4905 4905 IEM_MC_ASSIGN(i32AddrAdj, u32Src); 4906 4906 IEM_MC_AND_ARG_U32(u32Src, 0x1f); … … 4910 4910 IEM_MC_FETCH_EFLAGS(EFlags); 4911 4911 4912 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);4913 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))4912 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4913 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 4914 4914 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); 4915 4915 else … … 4931 4931 4932 4932 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 4933 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);4933 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4934 4934 IEM_MC_ASSIGN(i64AddrAdj, u64Src); 4935 4935 IEM_MC_AND_ARG_U64(u64Src, 0x3f); … … 4939 4939 IEM_MC_FETCH_EFLAGS(EFlags); 4940 4940 4941 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);4942 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))4941 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 4942 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 4943 4943 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); 4944 4944 else … … 4980 4980 IEMOP_HLP_NO_LOCK_PREFIX(); 4981 4981 4982 switch (p IemCpu->enmEffOpSize)4982 switch (pVCpu->iem.s.enmEffOpSize) 4983 4983 { 4984 4984 case IEMMODE_16BIT: … … 4989 4989 IEM_MC_ARG(uint32_t *, pEFlags, 3); 4990 4990 4991 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);4992 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);4991 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 4992 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 4993 4993 IEM_MC_REF_EFLAGS(pEFlags); 4994 4994 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags); … … 5005 5005 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5006 5006 5007 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5008 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5007 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5008 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5009 5009 IEM_MC_REF_EFLAGS(pEFlags); 5010 5010 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); … … 5022 5022 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5023 5023 5024 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5025 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5024 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5025 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5026 5026 IEM_MC_REF_EFLAGS(pEFlags); 5027 5027 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags); … … 5038 5038 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 5039 5039 5040 switch (p IemCpu->enmEffOpSize)5040 switch (pVCpu->iem.s.enmEffOpSize) 5041 5041 { 5042 5042 case IEMMODE_16BIT: … … 5051 5051 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 5052 5052 IEM_MC_ASSIGN(cShiftArg, cShift); 5053 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5053 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5054 5054 IEM_MC_FETCH_EFLAGS(EFlags); 5055 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5055 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5056 5056 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags); 5057 5057 … … 5073 5073 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 5074 5074 IEM_MC_ASSIGN(cShiftArg, cShift); 5075 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5075 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5076 5076 IEM_MC_FETCH_EFLAGS(EFlags); 5077 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5077 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5078 5078 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); 5079 5079 … … 5095 5095 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 5096 5096 IEM_MC_ASSIGN(cShiftArg, cShift); 5097 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5097 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5098 5098 IEM_MC_FETCH_EFLAGS(EFlags); 5099 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5099 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5100 5100 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags); 5101 5101 … … 5125 5125 IEMOP_HLP_NO_LOCK_PREFIX(); 5126 5126 5127 switch (p IemCpu->enmEffOpSize)5127 switch (pVCpu->iem.s.enmEffOpSize) 5128 5128 { 5129 5129 case IEMMODE_16BIT: … … 5134 5134 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5135 5135 5136 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5137 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5136 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5137 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5138 5138 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 5139 5139 IEM_MC_REF_EFLAGS(pEFlags); … … 5151 5151 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5152 5152 5153 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5154 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5153 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5154 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5155 5155 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 5156 5156 IEM_MC_REF_EFLAGS(pEFlags); … … 5169 5169 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5170 5170 5171 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5172 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5171 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5172 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5173 5173 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 5174 5174 IEM_MC_REF_EFLAGS(pEFlags); … … 5186 5186 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 5187 5187 5188 switch (p IemCpu->enmEffOpSize)5188 switch (pVCpu->iem.s.enmEffOpSize) 5189 5189 { 5190 5190 case IEMMODE_16BIT: … … 5197 5197 5198 5198 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5199 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5199 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5200 5200 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 5201 5201 IEM_MC_FETCH_EFLAGS(EFlags); 5202 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5202 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5203 5203 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags); 5204 5204 … … 5218 5218 5219 5219 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5220 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5220 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5221 5221 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 5222 5222 IEM_MC_FETCH_EFLAGS(EFlags); 5223 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5223 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5224 5224 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); 5225 5225 … … 5239 5239 5240 5240 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5241 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5241 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5242 5242 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 5243 5243 IEM_MC_FETCH_EFLAGS(EFlags); 5244 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5244 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5245 5245 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags); 5246 5246 … … 5292 5292 IEMOP_HLP_MIN_386(); 5293 5293 IEMOP_HLP_NO_LOCK_PREFIX(); 5294 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, p IemCpu->enmEffOpSize);5294 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pVCpu->iem.s.enmEffOpSize); 5295 5295 } 5296 5296 … … 5332 5332 { 5333 5333 IEMOP_MNEMONIC("fxsave m512"); 5334 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fFxSaveRstor)5334 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFxSaveRstor) 5335 5335 return IEMOP_RAISE_INVALID_OPCODE(); 5336 5336 … … 5338 5338 IEM_MC_ARG(uint8_t, iEffSeg, 0); 5339 5339 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5340 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/p IemCpu->enmEffOpSize, 2);5340 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 2); 5341 5341 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5342 5342 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5343 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);5343 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 5344 5344 IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize); 5345 5345 IEM_MC_END(); … … 5352 5352 { 5353 5353 IEMOP_MNEMONIC("fxrstor m512"); 5354 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fFxSaveRstor)5354 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFxSaveRstor) 5355 5355 return IEMOP_RAISE_INVALID_OPCODE(); 5356 5356 … … 5358 5358 IEM_MC_ARG(uint8_t, iEffSeg, 0); 5359 5359 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5360 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/p IemCpu->enmEffOpSize, 2);5360 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 2); 5361 5361 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5362 5362 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5363 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);5363 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 5364 5364 IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize); 5365 5365 IEM_MC_END(); … … 5392 5392 IEMOP_MNEMONIC("lfence"); 5393 5393 IEMOP_HLP_NO_LOCK_PREFIX(); 5394 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse2)5394 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) 5395 5395 return IEMOP_RAISE_INVALID_OPCODE(); 5396 5396 5397 5397 IEM_MC_BEGIN(0, 0); 5398 if (IEM_GET_HOST_CPU_FEATURES(p IemCpu)->fSse2)5398 if (IEM_GET_HOST_CPU_FEATURES(pVCpu)->fSse2) 5399 5399 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence); 5400 5400 else … … 5411 5411 IEMOP_MNEMONIC("mfence"); 5412 5412 IEMOP_HLP_NO_LOCK_PREFIX(); 5413 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse2)5413 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) 5414 5414 return IEMOP_RAISE_INVALID_OPCODE(); 5415 5415 5416 5416 IEM_MC_BEGIN(0, 0); 5417 if (IEM_GET_HOST_CPU_FEATURES(p IemCpu)->fSse2)5417 if (IEM_GET_HOST_CPU_FEATURES(pVCpu)->fSse2) 5418 5418 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence); 5419 5419 else … … 5430 5430 IEMOP_MNEMONIC("sfence"); 5431 5431 IEMOP_HLP_NO_LOCK_PREFIX(); 5432 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse2)5432 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) 5433 5433 return IEMOP_RAISE_INVALID_OPCODE(); 5434 5434 5435 5435 IEM_MC_BEGIN(0, 0); 5436 if (IEM_GET_HOST_CPU_FEATURES(p IemCpu)->fSse2)5436 if (IEM_GET_HOST_CPU_FEATURES(pVCpu)->fSse2) 5437 5437 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence); 5438 5438 else … … 5479 5479 else 5480 5480 { 5481 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK))5481 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK)) 5482 5482 { 5483 5483 case 0: … … 5544 5544 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5545 5545 5546 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5547 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5546 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5547 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5548 5548 IEM_MC_REF_GREG_U8(pu8Al, X86_GREG_xAX); 5549 5549 IEM_MC_REF_EFLAGS(pEFlags); 5550 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5550 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5551 5551 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags); 5552 5552 else … … 5568 5568 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5569 5569 IEMOP_HLP_DONE_DECODING(); 5570 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5571 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5570 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5571 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5572 5572 IEM_MC_FETCH_GREG_U8(u8Al, X86_GREG_xAX); 5573 5573 IEM_MC_FETCH_EFLAGS(EFlags); 5574 5574 IEM_MC_REF_LOCAL(pu8Al, u8Al); 5575 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5575 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5576 5576 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags); 5577 5577 else … … 5597 5597 { 5598 5598 IEMOP_HLP_DONE_DECODING(); 5599 switch (p IemCpu->enmEffOpSize)5599 switch (pVCpu->iem.s.enmEffOpSize) 5600 5600 { 5601 5601 case IEMMODE_16BIT: … … 5606 5606 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5607 5607 5608 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5609 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5608 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5609 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5610 5610 IEM_MC_REF_GREG_U16(pu16Ax, X86_GREG_xAX); 5611 5611 IEM_MC_REF_EFLAGS(pEFlags); 5612 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5612 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5613 5613 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags); 5614 5614 else … … 5626 5626 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5627 5627 5628 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5629 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5628 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5629 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5630 5630 IEM_MC_REF_GREG_U32(pu32Eax, X86_GREG_xAX); 5631 5631 IEM_MC_REF_EFLAGS(pEFlags); 5632 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5632 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5633 5633 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags); 5634 5634 else … … 5652 5652 IEM_MC_ARG(uint32_t *, pEFlags, 3); 5653 5653 5654 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5654 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5655 5655 IEM_MC_REF_GREG_U64(pu64Rax, X86_GREG_xAX); 5656 5656 IEM_MC_REF_EFLAGS(pEFlags); 5657 5657 #ifdef RT_ARCH_X86 5658 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5659 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5658 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5659 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5660 5660 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags); 5661 5661 else 5662 5662 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags); 5663 5663 #else 5664 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5665 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5664 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5665 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5666 5666 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags); 5667 5667 else … … 5678 5678 else 5679 5679 { 5680 switch (p IemCpu->enmEffOpSize)5680 switch (pVCpu->iem.s.enmEffOpSize) 5681 5681 { 5682 5682 case IEMMODE_16BIT: … … 5691 5691 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5692 5692 IEMOP_HLP_DONE_DECODING(); 5693 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5694 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5693 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5694 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5695 5695 IEM_MC_FETCH_GREG_U16(u16Ax, X86_GREG_xAX); 5696 5696 IEM_MC_FETCH_EFLAGS(EFlags); 5697 5697 IEM_MC_REF_LOCAL(pu16Ax, u16Ax); 5698 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5698 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5699 5699 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags); 5700 5700 else … … 5719 5719 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5720 5720 IEMOP_HLP_DONE_DECODING(); 5721 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5722 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5721 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5722 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5723 5723 IEM_MC_FETCH_GREG_U32(u32Eax, X86_GREG_xAX); 5724 5724 IEM_MC_FETCH_EFLAGS(EFlags); 5725 5725 IEM_MC_REF_LOCAL(pu32Eax, u32Eax); 5726 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5726 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5727 5727 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags); 5728 5728 else … … 5751 5751 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5752 5752 IEMOP_HLP_DONE_DECODING(); 5753 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0);5753 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 5754 5754 IEM_MC_FETCH_GREG_U64(u64Rax, X86_GREG_xAX); 5755 5755 IEM_MC_FETCH_EFLAGS(EFlags); 5756 5756 IEM_MC_REF_LOCAL(pu64Rax, u64Rax); 5757 5757 #ifdef RT_ARCH_X86 5758 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5759 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5758 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5759 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5760 5760 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags); 5761 5761 else 5762 5762 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags); 5763 5763 #else 5764 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);5765 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))5764 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 5765 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 5766 5766 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags); 5767 5767 else … … 5785 5785 { 5786 5786 Assert((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)); /* Caller checks this */ 5787 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg;5788 5789 switch (p IemCpu->enmEffOpSize)5787 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 5788 5789 switch (pVCpu->iem.s.enmEffOpSize) 5790 5790 { 5791 5791 case IEMMODE_16BIT: … … 5795 5795 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2); 5796 5796 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3); 5797 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/p IemCpu->enmEffOpSize, 4);5797 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4); 5798 5798 IEM_MC_LOCAL(RTGCPTR, GCPtrEff); 5799 5799 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5800 5800 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5801 IEM_MC_FETCH_MEM_U16(offSeg, p IemCpu->iEffSeg, GCPtrEff);5802 IEM_MC_FETCH_MEM_U16_DISP(uSel, p IemCpu->iEffSeg, GCPtrEff, 2);5801 IEM_MC_FETCH_MEM_U16(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff); 5802 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 2); 5803 5803 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize); 5804 5804 IEM_MC_END(); … … 5811 5811 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2); 5812 5812 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3); 5813 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/p IemCpu->enmEffOpSize, 4);5813 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4); 5814 5814 IEM_MC_LOCAL(RTGCPTR, GCPtrEff); 5815 5815 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5816 5816 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5817 IEM_MC_FETCH_MEM_U32(offSeg, p IemCpu->iEffSeg, GCPtrEff);5818 IEM_MC_FETCH_MEM_U16_DISP(uSel, p IemCpu->iEffSeg, GCPtrEff, 4);5817 IEM_MC_FETCH_MEM_U32(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff); 5818 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 4); 5819 5819 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize); 5820 5820 IEM_MC_END(); … … 5827 5827 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2); 5828 5828 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3); 5829 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/p IemCpu->enmEffOpSize, 4);5829 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 4); 5830 5830 IEM_MC_LOCAL(RTGCPTR, GCPtrEff); 5831 5831 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5832 5832 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5833 if (IEM_IS_GUEST_CPU_AMD(p IemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */5834 IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, p IemCpu->iEffSeg, GCPtrEff);5833 if (IEM_IS_GUEST_CPU_AMD(pVCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */ 5834 IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff); 5835 5835 else 5836 IEM_MC_FETCH_MEM_U64(offSeg, p IemCpu->iEffSeg, GCPtrEff);5837 IEM_MC_FETCH_MEM_U16_DISP(uSel, p IemCpu->iEffSeg, GCPtrEff, 8);5836 IEM_MC_FETCH_MEM_U64(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEff); 5837 IEM_MC_FETCH_MEM_U16_DISP(uSel, pVCpu->iem.s.iEffSeg, GCPtrEff, 8); 5838 5838 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize); 5839 5839 IEM_MC_END(); … … 5903 5903 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 5904 5904 { 5905 switch (p IemCpu->enmEffOpSize)5905 switch (pVCpu->iem.s.enmEffOpSize) 5906 5906 { 5907 5907 case IEMMODE_16BIT: 5908 5908 IEM_MC_BEGIN(0, 1); 5909 5909 IEM_MC_LOCAL(uint16_t, u16Value); 5910 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5911 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Value);5910 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5911 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Value); 5912 5912 IEM_MC_ADVANCE_RIP(); 5913 5913 IEM_MC_END(); … … 5917 5917 IEM_MC_BEGIN(0, 1); 5918 5918 IEM_MC_LOCAL(uint32_t, u32Value); 5919 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5920 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);5919 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5920 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 5921 5921 IEM_MC_ADVANCE_RIP(); 5922 5922 IEM_MC_END(); … … 5926 5926 IEM_MC_BEGIN(0, 1); 5927 5927 IEM_MC_LOCAL(uint64_t, u64Value); 5928 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);5929 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);5928 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 5929 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 5930 5930 IEM_MC_ADVANCE_RIP(); 5931 5931 IEM_MC_END(); … … 5940 5940 * We're loading a register from memory. 5941 5941 */ 5942 switch (p IemCpu->enmEffOpSize)5942 switch (pVCpu->iem.s.enmEffOpSize) 5943 5943 { 5944 5944 case IEMMODE_16BIT: … … 5947 5947 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 5948 5948 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5949 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, p IemCpu->iEffSeg, GCPtrEffDst);5950 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Value);5949 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 5950 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Value); 5951 5951 IEM_MC_ADVANCE_RIP(); 5952 5952 IEM_MC_END(); … … 5958 5958 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 5959 5959 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5960 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, p IemCpu->iEffSeg, GCPtrEffDst);5961 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);5960 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 5961 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 5962 5962 IEM_MC_ADVANCE_RIP(); 5963 5963 IEM_MC_END(); … … 5969 5969 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 5970 5970 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 5971 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, p IemCpu->iEffSeg, GCPtrEffDst);5972 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);5971 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 5972 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 5973 5973 IEM_MC_ADVANCE_RIP(); 5974 5974 IEM_MC_END(); … … 5998 5998 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 5999 5999 { 6000 if (p IemCpu->enmEffOpSize != IEMMODE_64BIT)6000 if (pVCpu->iem.s.enmEffOpSize != IEMMODE_64BIT) 6001 6001 { 6002 6002 IEM_MC_BEGIN(0, 1); 6003 6003 IEM_MC_LOCAL(uint32_t, u32Value); 6004 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6005 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);6004 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6005 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 6006 6006 IEM_MC_ADVANCE_RIP(); 6007 6007 IEM_MC_END(); … … 6011 6011 IEM_MC_BEGIN(0, 1); 6012 6012 IEM_MC_LOCAL(uint64_t, u64Value); 6013 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6014 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);6013 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6014 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 6015 6015 IEM_MC_ADVANCE_RIP(); 6016 6016 IEM_MC_END(); … … 6022 6022 * We're loading a register from memory. 6023 6023 */ 6024 if (p IemCpu->enmEffOpSize != IEMMODE_64BIT)6024 if (pVCpu->iem.s.enmEffOpSize != IEMMODE_64BIT) 6025 6025 { 6026 6026 IEM_MC_BEGIN(0, 2); … … 6028 6028 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 6029 6029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6030 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, p IemCpu->iEffSeg, GCPtrEffDst);6031 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);6030 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 6031 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 6032 6032 IEM_MC_ADVANCE_RIP(); 6033 6033 IEM_MC_END(); … … 6039 6039 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 6040 6040 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6041 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, p IemCpu->iEffSeg, GCPtrEffDst);6042 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);6041 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 6042 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 6043 6043 IEM_MC_ADVANCE_RIP(); 6044 6044 IEM_MC_END(); … … 6085 6085 IEMOP_HLP_NO_LOCK_PREFIX(); 6086 6086 6087 switch (p IemCpu->enmEffOpSize)6087 switch (pVCpu->iem.s.enmEffOpSize) 6088 6088 { 6089 6089 case IEMMODE_16BIT: … … 6093 6093 IEM_MC_ARG(uint32_t *, pEFlags, 2); 6094 6094 6095 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6095 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6096 6096 IEM_MC_REF_EFLAGS(pEFlags); 6097 6097 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); … … 6107 6107 IEM_MC_ARG(uint32_t *, pEFlags, 2); 6108 6108 6109 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6109 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6110 6110 IEM_MC_REF_EFLAGS(pEFlags); 6111 6111 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); … … 6122 6122 IEM_MC_ARG(uint32_t *, pEFlags, 2); 6123 6123 6124 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6124 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6125 6125 IEM_MC_REF_EFLAGS(pEFlags); 6126 6126 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); … … 6147 6147 6148 6148 /** @todo test negative bit offsets! */ 6149 switch (p IemCpu->enmEffOpSize)6149 switch (pVCpu->iem.s.enmEffOpSize) 6150 6150 { 6151 6151 case IEMMODE_16BIT: … … 6160 6160 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f); 6161 6161 IEM_MC_FETCH_EFLAGS(EFlags); 6162 IEM_MC_MEM_MAP(pu16Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0);6163 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6162 IEM_MC_MEM_MAP(pu16Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 6163 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6164 6164 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); 6165 6165 else … … 6183 6183 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f); 6184 6184 IEM_MC_FETCH_EFLAGS(EFlags); 6185 IEM_MC_MEM_MAP(pu32Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0);6186 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6185 IEM_MC_MEM_MAP(pu32Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 6186 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6187 6187 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); 6188 6188 else … … 6206 6206 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f); 6207 6207 IEM_MC_FETCH_EFLAGS(EFlags); 6208 IEM_MC_MEM_MAP(pu64Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0);6209 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6208 IEM_MC_MEM_MAP(pu64Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0); 6209 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6210 6210 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); 6211 6211 else … … 6268 6268 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 6269 6269 { 6270 switch (p IemCpu->enmEffOpSize)6270 switch (pVCpu->iem.s.enmEffOpSize) 6271 6271 { 6272 6272 case IEMMODE_16BIT: 6273 6273 IEM_MC_BEGIN(0, 1); 6274 6274 IEM_MC_LOCAL(uint16_t, u16Value); 6275 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6276 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Value);6275 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6276 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Value); 6277 6277 IEM_MC_ADVANCE_RIP(); 6278 6278 IEM_MC_END(); … … 6282 6282 IEM_MC_BEGIN(0, 1); 6283 6283 IEM_MC_LOCAL(uint32_t, u32Value); 6284 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6285 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);6284 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6285 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 6286 6286 IEM_MC_ADVANCE_RIP(); 6287 6287 IEM_MC_END(); … … 6291 6291 IEM_MC_BEGIN(0, 1); 6292 6292 IEM_MC_LOCAL(uint64_t, u64Value); 6293 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6294 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);6293 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6294 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 6295 6295 IEM_MC_ADVANCE_RIP(); 6296 6296 IEM_MC_END(); … … 6305 6305 * We're loading a register from memory. 6306 6306 */ 6307 switch (p IemCpu->enmEffOpSize)6307 switch (pVCpu->iem.s.enmEffOpSize) 6308 6308 { 6309 6309 case IEMMODE_16BIT: … … 6312 6312 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 6313 6313 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6314 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, p IemCpu->iEffSeg, GCPtrEffDst);6315 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Value);6314 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 6315 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Value); 6316 6316 IEM_MC_ADVANCE_RIP(); 6317 6317 IEM_MC_END(); … … 6323 6323 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 6324 6324 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6325 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, p IemCpu->iEffSeg, GCPtrEffDst);6326 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);6325 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 6326 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 6327 6327 IEM_MC_ADVANCE_RIP(); 6328 6328 IEM_MC_END(); … … 6334 6334 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 6335 6335 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6336 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, p IemCpu->iEffSeg, GCPtrEffDst);6337 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);6336 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 6337 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 6338 6338 IEM_MC_ADVANCE_RIP(); 6339 6339 IEM_MC_END(); … … 6363 6363 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 6364 6364 { 6365 if (p IemCpu->enmEffOpSize != IEMMODE_64BIT)6365 if (pVCpu->iem.s.enmEffOpSize != IEMMODE_64BIT) 6366 6366 { 6367 6367 IEM_MC_BEGIN(0, 1); 6368 6368 IEM_MC_LOCAL(uint32_t, u32Value); 6369 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6370 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);6369 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6370 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 6371 6371 IEM_MC_ADVANCE_RIP(); 6372 6372 IEM_MC_END(); … … 6376 6376 IEM_MC_BEGIN(0, 1); 6377 6377 IEM_MC_LOCAL(uint64_t, u64Value); 6378 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6379 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);6378 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6379 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 6380 6380 IEM_MC_ADVANCE_RIP(); 6381 6381 IEM_MC_END(); … … 6387 6387 * We're loading a register from memory. 6388 6388 */ 6389 if (p IemCpu->enmEffOpSize != IEMMODE_64BIT)6389 if (pVCpu->iem.s.enmEffOpSize != IEMMODE_64BIT) 6390 6390 { 6391 6391 IEM_MC_BEGIN(0, 2); … … 6393 6393 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 6394 6394 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6395 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, p IemCpu->iEffSeg, GCPtrEffDst);6396 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);6395 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 6396 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 6397 6397 IEM_MC_ADVANCE_RIP(); 6398 6398 IEM_MC_END(); … … 6404 6404 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 6405 6405 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6406 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, p IemCpu->iEffSeg, GCPtrEffDst);6407 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);6406 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 6407 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 6408 6408 IEM_MC_ADVANCE_RIP(); 6409 6409 IEM_MC_END(); … … 6433 6433 IEM_MC_ARG(uint32_t *, pEFlags, 2); 6434 6434 6435 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6436 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6435 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6436 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6437 6437 IEM_MC_REF_EFLAGS(pEFlags); 6438 6438 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags); … … 6454 6454 6455 6455 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6456 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);6457 IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6456 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 6457 IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6458 6458 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy); 6459 6459 IEM_MC_FETCH_EFLAGS(EFlags); 6460 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6460 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6461 6461 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags); 6462 6462 else … … 6465 6465 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW); 6466 6466 IEM_MC_COMMIT_EFLAGS(EFlags); 6467 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u8RegCopy);6467 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u8RegCopy); 6468 6468 IEM_MC_ADVANCE_RIP(); 6469 6469 IEM_MC_END(); … … 6488 6488 IEMOP_HLP_NO_LOCK_PREFIX(); 6489 6489 6490 switch (p IemCpu->enmEffOpSize)6490 switch (pVCpu->iem.s.enmEffOpSize) 6491 6491 { 6492 6492 case IEMMODE_16BIT: … … 6496 6496 IEM_MC_ARG(uint32_t *, pEFlags, 2); 6497 6497 6498 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6499 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6498 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6499 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6500 6500 IEM_MC_REF_EFLAGS(pEFlags); 6501 6501 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags); … … 6511 6511 IEM_MC_ARG(uint32_t *, pEFlags, 2); 6512 6512 6513 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6514 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6513 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6514 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6515 6515 IEM_MC_REF_EFLAGS(pEFlags); 6516 6516 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags); … … 6528 6528 IEM_MC_ARG(uint32_t *, pEFlags, 2); 6529 6529 6530 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6531 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6530 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6531 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6532 6532 IEM_MC_REF_EFLAGS(pEFlags); 6533 6533 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags); … … 6545 6545 * We're accessing memory. 6546 6546 */ 6547 switch (p IemCpu->enmEffOpSize)6547 switch (pVCpu->iem.s.enmEffOpSize) 6548 6548 { 6549 6549 case IEMMODE_16BIT: … … 6556 6556 6557 6557 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6558 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);6559 IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6558 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 6559 IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6560 6560 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy); 6561 6561 IEM_MC_FETCH_EFLAGS(EFlags); 6562 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6562 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6563 6563 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags); 6564 6564 else … … 6567 6567 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW); 6568 6568 IEM_MC_COMMIT_EFLAGS(EFlags); 6569 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16RegCopy);6569 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16RegCopy); 6570 6570 IEM_MC_ADVANCE_RIP(); 6571 6571 IEM_MC_END(); … … 6581 6581 6582 6582 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6583 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);6584 IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6583 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 6584 IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6585 6585 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy); 6586 6586 IEM_MC_FETCH_EFLAGS(EFlags); 6587 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6587 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6588 6588 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags); 6589 6589 else … … 6592 6592 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW); 6593 6593 IEM_MC_COMMIT_EFLAGS(EFlags); 6594 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32RegCopy);6594 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32RegCopy); 6595 6595 IEM_MC_ADVANCE_RIP(); 6596 6596 IEM_MC_END(); … … 6606 6606 6607 6607 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6608 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);6609 IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6608 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 6609 IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6610 6610 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy); 6611 6611 IEM_MC_FETCH_EFLAGS(EFlags); 6612 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6612 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6613 6613 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags); 6614 6614 else … … 6617 6617 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW); 6618 6618 IEM_MC_COMMIT_EFLAGS(EFlags); 6619 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64RegCopy);6619 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64RegCopy); 6620 6620 IEM_MC_ADVANCE_RIP(); 6621 6621 IEM_MC_END(); … … 6641 6641 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) 6642 6642 { 6643 switch (p IemCpu->enmEffOpSize)6643 switch (pVCpu->iem.s.enmEffOpSize) 6644 6644 { 6645 6645 case IEMMODE_32BIT: … … 6650 6650 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6651 6651 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 6652 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse2)6652 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) 6653 6653 return IEMOP_RAISE_INVALID_OPCODE(); 6654 6654 6655 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6656 IEM_MC_STORE_MEM_U32(p IemCpu->iEffSeg, GCPtrEffDst, u32Value);6655 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6656 IEM_MC_STORE_MEM_U32(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u32Value); 6657 6657 IEM_MC_ADVANCE_RIP(); 6658 6658 IEM_MC_END(); … … 6666 6666 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6667 6667 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 6668 if (!IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fSse2)6668 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) 6669 6669 return IEMOP_RAISE_INVALID_OPCODE(); 6670 6670 6671 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6672 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffDst, u64Value);6671 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6672 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u64Value); 6673 6673 IEM_MC_ADVANCE_RIP(); 6674 6674 IEM_MC_END(); … … 6712 6712 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6713 6713 IEMOP_HLP_DONE_DECODING(); 6714 IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);6714 IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 6715 6715 6716 6716 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Lo, X86_GREG_xAX); … … 6723 6723 6724 6724 IEM_MC_FETCH_EFLAGS(EFlags); 6725 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))6725 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 6726 6726 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags); 6727 6727 else … … 6773 6773 /** @todo Testcase: Check prefix effects on cmpxchg8b/16b. */ 6774 6774 if ( (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) 6775 || (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))) /** @todo Testcase: AMD seems to express a different idea here wrt prefixes. */6775 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))) /** @todo Testcase: AMD seems to express a different idea here wrt prefixes. */ 6776 6776 return IEMOP_RAISE_INVALID_OPCODE(); 6777 6777 if (bRm & IEM_OP_PRF_SIZE_REX_W) … … 6781 6781 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 6782 6782 return FNIEMOP_CALL_1(iemOp_Grp9_rdrand_Rv, bRm); 6783 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))6783 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ)) 6784 6784 { 6785 6785 case 0: … … 6793 6793 } 6794 6794 case 7: 6795 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))6795 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ)) 6796 6796 { 6797 6797 case 0: … … 6812 6812 { 6813 6813 IEMOP_HLP_NO_LOCK_PREFIX(); 6814 switch (p IemCpu->enmEffOpSize)6814 switch (pVCpu->iem.s.enmEffOpSize) 6815 6815 { 6816 6816 case IEMMODE_16BIT: … … 6855 6855 case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */ 6856 6856 IEMOP_HLP_MIN_486(); 6857 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | p IemCpu->uRexB);6857 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pVCpu->iem.s.uRexB); 6858 6858 } 6859 6859 … … 6864 6864 IEMOP_MNEMONIC("bswap rCX/r9"); 6865 6865 IEMOP_HLP_MIN_486(); 6866 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | p IemCpu->uRexB);6866 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pVCpu->iem.s.uRexB); 6867 6867 } 6868 6868 … … 6873 6873 IEMOP_MNEMONIC("bswap rDX/r9"); 6874 6874 IEMOP_HLP_MIN_486(); 6875 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | p IemCpu->uRexB);6875 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pVCpu->iem.s.uRexB); 6876 6876 } 6877 6877 … … 6882 6882 IEMOP_MNEMONIC("bswap rBX/r9"); 6883 6883 IEMOP_HLP_MIN_486(); 6884 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | p IemCpu->uRexB);6884 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pVCpu->iem.s.uRexB); 6885 6885 } 6886 6886 … … 6891 6891 IEMOP_MNEMONIC("bswap rSP/r12"); 6892 6892 IEMOP_HLP_MIN_486(); 6893 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | p IemCpu->uRexB);6893 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pVCpu->iem.s.uRexB); 6894 6894 } 6895 6895 … … 6900 6900 IEMOP_MNEMONIC("bswap rBP/r13"); 6901 6901 IEMOP_HLP_MIN_486(); 6902 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | p IemCpu->uRexB);6902 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pVCpu->iem.s.uRexB); 6903 6903 } 6904 6904 … … 6909 6909 IEMOP_MNEMONIC("bswap rSI/r14"); 6910 6910 IEMOP_HLP_MIN_486(); 6911 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | p IemCpu->uRexB);6911 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pVCpu->iem.s.uRexB); 6912 6912 } 6913 6913 … … 6918 6918 IEMOP_MNEMONIC("bswap rDI/r15"); 6919 6919 IEMOP_HLP_MIN_486(); 6920 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | p IemCpu->uRexB);6920 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pVCpu->iem.s.uRexB); 6921 6921 } 6922 6922 … … 6952 6952 * and opcode modifications are made to work with the whole width (not 6953 6953 * just 128). */ 6954 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))6954 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 6955 6955 { 6956 6956 case IEM_OP_PRF_SIZE_OP: /* SSE */ … … 6962 6962 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 6963 6963 IEM_MC_PREPARE_SSE_USAGE(); 6964 IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);6965 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);6964 IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 6965 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 6966 6966 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_pmovmskb_u128, pDst, pSrc); 6967 6967 IEM_MC_ADVANCE_RIP(); … … 7025 7025 FNIEMOP_DEF(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq) 7026 7026 { 7027 IEMOP_MNEMONIC(!(p IemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movntq mr,r" : "movntdq mr,r");7027 IEMOP_MNEMONIC(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movntq mr,r" : "movntdq mr,r"); 7028 7028 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 7029 7029 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) … … 7033 7033 */ 7034 7034 /** @todo check when the REPNZ/Z bits kick in. Same as lock, probably... */ 7035 switch (p IemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))7035 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 7036 7036 { 7037 7037 … … 7046 7046 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 7047 7047 7048 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);7049 IEM_MC_STORE_MEM_U128_ALIGN_SSE(p IemCpu->iEffSeg, GCPtrEffSrc, uSrc);7048 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 7049 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 7050 7050 7051 7051 IEM_MC_ADVANCE_RIP(); … … 7064 7064 7065 7065 IEM_MC_FETCH_MREG_U64(uSrc, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 7066 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffSrc, uSrc);7066 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 7067 7067 7068 7068 IEM_MC_ADVANCE_RIP(); … … 7467 7467 IEMOP_HLP_NO_64BIT(); 7468 7468 IEMOP_HLP_NO_LOCK_PREFIX(); 7469 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, p IemCpu->enmEffOpSize);7469 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pVCpu->iem.s.enmEffOpSize); 7470 7470 } 7471 7471 … … 7604 7604 IEMOP_HLP_NO_LOCK_PREFIX(); 7605 7605 IEMOP_HLP_NO_64BIT(); 7606 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, p IemCpu->enmEffOpSize);7606 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pVCpu->iem.s.enmEffOpSize); 7607 7607 } 7608 7608 … … 7670 7670 IEMOP_HLP_NO_LOCK_PREFIX(); 7671 7671 IEMOP_HLP_NO_64BIT(); 7672 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, p IemCpu->enmEffOpSize);7672 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pVCpu->iem.s.enmEffOpSize); 7673 7673 } 7674 7674 … … 7732 7732 { 7733 7733 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es"); 7734 p IemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;7735 p IemCpu->iEffSeg = X86_SREG_ES;7734 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SEG_ES; 7735 pVCpu->iem.s.iEffSeg = X86_SREG_ES; 7736 7736 7737 7737 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 7803 7803 { 7804 7804 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs"); 7805 p IemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;7806 p IemCpu->iEffSeg = X86_SREG_CS;7805 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SEG_CS; 7806 pVCpu->iem.s.iEffSeg = X86_SREG_CS; 7807 7807 7808 7808 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 7880 7880 { 7881 7881 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss"); 7882 p IemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;7883 p IemCpu->iEffSeg = X86_SREG_SS;7882 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SEG_SS; 7883 pVCpu->iem.s.iEffSeg = X86_SREG_SS; 7884 7884 7885 7885 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 7946 7946 { 7947 7947 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds"); 7948 p IemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;7949 p IemCpu->iEffSeg = X86_SREG_DS;7948 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SEG_DS; 7949 pVCpu->iem.s.iEffSeg = X86_SREG_DS; 7950 7950 7951 7951 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 7963 7963 { 7964 7964 IEMOP_HLP_NO_LOCK_PREFIX(); 7965 switch (p IemCpu->enmEffOpSize)7965 switch (pVCpu->iem.s.enmEffOpSize) 7966 7966 { 7967 7967 case IEMMODE_16BIT: … … 8009 8009 * This is a REX prefix in 64-bit mode. 8010 8010 */ 8011 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8011 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8012 8012 { 8013 8013 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex"); 8014 p IemCpu->fPrefixes |= IEM_OP_PRF_REX;8014 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX; 8015 8015 8016 8016 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8029 8029 * This is a REX prefix in 64-bit mode. 8030 8030 */ 8031 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8031 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8032 8032 { 8033 8033 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b"); 8034 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;8035 p IemCpu->uRexB = 1 << 3;8034 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B; 8035 pVCpu->iem.s.uRexB = 1 << 3; 8036 8036 8037 8037 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8050 8050 * This is a REX prefix in 64-bit mode. 8051 8051 */ 8052 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8052 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8053 8053 { 8054 8054 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x"); 8055 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;8056 p IemCpu->uRexIndex = 1 << 3;8055 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X; 8056 pVCpu->iem.s.uRexIndex = 1 << 3; 8057 8057 8058 8058 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8072 8072 * This is a REX prefix in 64-bit mode. 8073 8073 */ 8074 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8074 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8075 8075 { 8076 8076 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx"); 8077 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;8078 p IemCpu->uRexB = 1 << 3;8079 p IemCpu->uRexIndex = 1 << 3;8077 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X; 8078 pVCpu->iem.s.uRexB = 1 << 3; 8079 pVCpu->iem.s.uRexIndex = 1 << 3; 8080 8080 8081 8081 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8094 8094 * This is a REX prefix in 64-bit mode. 8095 8095 */ 8096 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8096 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8097 8097 { 8098 8098 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r"); 8099 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;8100 p IemCpu->uRexReg = 1 << 3;8099 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R; 8100 pVCpu->iem.s.uRexReg = 1 << 3; 8101 8101 8102 8102 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8115 8115 * This is a REX prefix in 64-bit mode. 8116 8116 */ 8117 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8117 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8118 8118 { 8119 8119 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb"); 8120 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;8121 p IemCpu->uRexReg = 1 << 3;8122 p IemCpu->uRexB = 1 << 3;8120 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B; 8121 pVCpu->iem.s.uRexReg = 1 << 3; 8122 pVCpu->iem.s.uRexB = 1 << 3; 8123 8123 8124 8124 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8137 8137 * This is a REX prefix in 64-bit mode. 8138 8138 */ 8139 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8139 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8140 8140 { 8141 8141 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx"); 8142 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;8143 p IemCpu->uRexReg = 1 << 3;8144 p IemCpu->uRexIndex = 1 << 3;8142 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X; 8143 pVCpu->iem.s.uRexReg = 1 << 3; 8144 pVCpu->iem.s.uRexIndex = 1 << 3; 8145 8145 8146 8146 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8159 8159 * This is a REX prefix in 64-bit mode. 8160 8160 */ 8161 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8161 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8162 8162 { 8163 8163 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx"); 8164 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;8165 p IemCpu->uRexReg = 1 << 3;8166 p IemCpu->uRexB = 1 << 3;8167 p IemCpu->uRexIndex = 1 << 3;8164 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X; 8165 pVCpu->iem.s.uRexReg = 1 << 3; 8166 pVCpu->iem.s.uRexB = 1 << 3; 8167 pVCpu->iem.s.uRexIndex = 1 << 3; 8168 8168 8169 8169 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8182 8182 * This is a REX prefix in 64-bit mode. 8183 8183 */ 8184 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8184 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8185 8185 { 8186 8186 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w"); 8187 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;8188 iemRecalEffOpSize(p IemCpu);8187 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W; 8188 iemRecalEffOpSize(pVCpu); 8189 8189 8190 8190 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8203 8203 * This is a REX prefix in 64-bit mode. 8204 8204 */ 8205 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8205 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8206 8206 { 8207 8207 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw"); 8208 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;8209 p IemCpu->uRexB = 1 << 3;8210 iemRecalEffOpSize(p IemCpu);8208 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W; 8209 pVCpu->iem.s.uRexB = 1 << 3; 8210 iemRecalEffOpSize(pVCpu); 8211 8211 8212 8212 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8225 8225 * This is a REX prefix in 64-bit mode. 8226 8226 */ 8227 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8227 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8228 8228 { 8229 8229 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw"); 8230 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;8231 p IemCpu->uRexIndex = 1 << 3;8232 iemRecalEffOpSize(p IemCpu);8230 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; 8231 pVCpu->iem.s.uRexIndex = 1 << 3; 8232 iemRecalEffOpSize(pVCpu); 8233 8233 8234 8234 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8247 8247 * This is a REX prefix in 64-bit mode. 8248 8248 */ 8249 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8249 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8250 8250 { 8251 8251 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw"); 8252 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;8253 p IemCpu->uRexB = 1 << 3;8254 p IemCpu->uRexIndex = 1 << 3;8255 iemRecalEffOpSize(p IemCpu);8252 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; 8253 pVCpu->iem.s.uRexB = 1 << 3; 8254 pVCpu->iem.s.uRexIndex = 1 << 3; 8255 iemRecalEffOpSize(pVCpu); 8256 8256 8257 8257 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8270 8270 * This is a REX prefix in 64-bit mode. 8271 8271 */ 8272 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8272 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8273 8273 { 8274 8274 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw"); 8275 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;8276 p IemCpu->uRexReg = 1 << 3;8277 iemRecalEffOpSize(p IemCpu);8275 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W; 8276 pVCpu->iem.s.uRexReg = 1 << 3; 8277 iemRecalEffOpSize(pVCpu); 8278 8278 8279 8279 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8292 8292 * This is a REX prefix in 64-bit mode. 8293 8293 */ 8294 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8294 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8295 8295 { 8296 8296 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw"); 8297 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;8298 p IemCpu->uRexReg = 1 << 3;8299 p IemCpu->uRexB = 1 << 3;8300 iemRecalEffOpSize(p IemCpu);8297 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W; 8298 pVCpu->iem.s.uRexReg = 1 << 3; 8299 pVCpu->iem.s.uRexB = 1 << 3; 8300 iemRecalEffOpSize(pVCpu); 8301 8301 8302 8302 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8315 8315 * This is a REX prefix in 64-bit mode. 8316 8316 */ 8317 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8317 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8318 8318 { 8319 8319 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw"); 8320 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;8321 p IemCpu->uRexReg = 1 << 3;8322 p IemCpu->uRexIndex = 1 << 3;8323 iemRecalEffOpSize(p IemCpu);8320 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; 8321 pVCpu->iem.s.uRexReg = 1 << 3; 8322 pVCpu->iem.s.uRexIndex = 1 << 3; 8323 iemRecalEffOpSize(pVCpu); 8324 8324 8325 8325 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8338 8338 * This is a REX prefix in 64-bit mode. 8339 8339 */ 8340 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8340 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8341 8341 { 8342 8342 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw"); 8343 p IemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;8344 p IemCpu->uRexReg = 1 << 3;8345 p IemCpu->uRexB = 1 << 3;8346 p IemCpu->uRexIndex = 1 << 3;8347 iemRecalEffOpSize(p IemCpu);8343 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W; 8344 pVCpu->iem.s.uRexReg = 1 << 3; 8345 pVCpu->iem.s.uRexB = 1 << 3; 8346 pVCpu->iem.s.uRexIndex = 1 << 3; 8347 iemRecalEffOpSize(pVCpu); 8348 8348 8349 8349 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8362 8362 { 8363 8363 IEMOP_HLP_NO_LOCK_PREFIX(); 8364 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8365 { 8366 iReg |= p IemCpu->uRexB;8367 p IemCpu->enmDefOpSize = IEMMODE_64BIT;8368 p IemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;8369 } 8370 8371 switch (p IemCpu->enmEffOpSize)8364 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8365 { 8366 iReg |= pVCpu->iem.s.uRexB; 8367 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; 8368 pVCpu->iem.s.enmEffOpSize = !(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT; 8369 } 8370 8371 switch (pVCpu->iem.s.enmEffOpSize) 8372 8372 { 8373 8373 case IEMMODE_16BIT: … … 8439 8439 { 8440 8440 IEMOP_MNEMONIC("push rSP"); 8441 if (IEM_GET_TARGET_CPU(p IemCpu) == IEMTARGETCPU_8086)8441 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_8086) 8442 8442 { 8443 8443 IEM_MC_BEGIN(0, 1); … … 8483 8483 { 8484 8484 IEMOP_HLP_NO_LOCK_PREFIX(); 8485 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8486 { 8487 iReg |= p IemCpu->uRexB;8488 p IemCpu->enmDefOpSize = IEMMODE_64BIT;8489 p IemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;8490 } 8491 8492 switch (p IemCpu->enmEffOpSize)8485 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8486 { 8487 iReg |= pVCpu->iem.s.uRexB; 8488 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; 8489 pVCpu->iem.s.enmEffOpSize = !(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT; 8490 } 8491 8492 switch (pVCpu->iem.s.enmEffOpSize) 8493 8493 { 8494 8494 case IEMMODE_16BIT: … … 8561 8561 { 8562 8562 IEMOP_MNEMONIC("pop rSP"); 8563 if (p IemCpu->enmCpuMode == IEMMODE_64BIT)8564 { 8565 if (p IemCpu->uRexB)8563 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 8564 { 8565 if (pVCpu->iem.s.uRexB) 8566 8566 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP); 8567 p IemCpu->enmDefOpSize = IEMMODE_64BIT;8568 p IemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;8567 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; 8568 pVCpu->iem.s.enmEffOpSize = !(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT; 8569 8569 } 8570 8570 … … 8572 8572 DISOPTYPE_HARMLESS | DISOPTYPE_DEFAULT_64_OP_SIZE | DISOPTYPE_REXB_EXTENDS_OPREG); 8573 8573 /** @todo add testcase for this instruction. */ 8574 switch (p IemCpu->enmEffOpSize)8574 switch (pVCpu->iem.s.enmEffOpSize) 8575 8575 { 8576 8576 case IEMMODE_16BIT: … … 8636 8636 IEMOP_HLP_MIN_186(); 8637 8637 IEMOP_HLP_NO_64BIT(); 8638 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)8638 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 8639 8639 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16); 8640 Assert(p IemCpu->enmEffOpSize == IEMMODE_32BIT);8640 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT); 8641 8641 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32); 8642 8642 } … … 8649 8649 IEMOP_HLP_MIN_186(); 8650 8650 IEMOP_HLP_NO_64BIT(); 8651 if (p IemCpu->enmEffOpSize == IEMMODE_16BIT)8651 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT) 8652 8652 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16); 8653 Assert(p IemCpu->enmEffOpSize == IEMMODE_32BIT);8653 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT); 8654 8654 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32); 8655 8655 } … … 8697 8697 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 8698 8698 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS); 8699 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);8699 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 8700 8700 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 8701 8701 IEM_MC_FETCH_EFLAGS(EFlags); … … 8718 8718 FNIEMOP_DEF(iemOp_movsxd_Gv_Ev) 8719 8719 { 8720 Assert(p IemCpu->enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */8720 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */ 8721 8721 8722 8722 IEMOP_MNEMONIC("movsxd Gv,Ev"); … … 8731 8731 IEM_MC_BEGIN(0, 1); 8732 8732 IEM_MC_LOCAL(uint64_t, u64Value); 8733 IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);8734 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);8733 IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 8734 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 8735 8735 IEM_MC_ADVANCE_RIP(); 8736 8736 IEM_MC_END(); … … 8746 8746 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 8747 8747 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 8748 IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, p IemCpu->iEffSeg, GCPtrEffDst);8749 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);8748 IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 8749 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 8750 8750 IEM_MC_ADVANCE_RIP(); 8751 8751 IEM_MC_END(); … … 8761 8761 IEMOP_HLP_MIN_386(); 8762 8762 8763 p IemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;8764 p IemCpu->iEffSeg = X86_SREG_FS;8763 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SEG_FS; 8764 pVCpu->iem.s.iEffSeg = X86_SREG_FS; 8765 8765 8766 8766 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8775 8775 IEMOP_HLP_MIN_386(); 8776 8776 8777 p IemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;8778 p IemCpu->iEffSeg = X86_SREG_GS;8777 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SEG_GS; 8778 pVCpu->iem.s.iEffSeg = X86_SREG_GS; 8779 8779 8780 8780 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8789 8789 IEMOP_HLP_MIN_386(); 8790 8790 8791 p IemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;8792 iemRecalEffOpSize(p IemCpu);8791 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SIZE_OP; 8792 iemRecalEffOpSize(pVCpu); 8793 8793 8794 8794 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 8803 8803 IEMOP_HLP_MIN_386(); 8804 8804 8805 p IemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;8806 switch (p IemCpu->enmDefAddrMode)8807 { 8808 case IEMMODE_16BIT: p IemCpu->enmEffAddrMode = IEMMODE_32BIT; break;8809 case IEMMODE_32BIT: p IemCpu->enmEffAddrMode = IEMMODE_16BIT; break;8810 case IEMMODE_64BIT: p IemCpu->enmEffAddrMode = IEMMODE_32BIT; break;8805 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_SIZE_ADDR; 8806 switch (pVCpu->iem.s.enmDefAddrMode) 8807 { 8808 case IEMMODE_16BIT: pVCpu->iem.s.enmEffAddrMode = IEMMODE_32BIT; break; 8809 case IEMMODE_32BIT: pVCpu->iem.s.enmEffAddrMode = IEMMODE_16BIT; break; 8810 case IEMMODE_64BIT: pVCpu->iem.s.enmEffAddrMode = IEMMODE_32BIT; break; 8811 8811 default: AssertFailed(); 8812 8812 } … … 8823 8823 IEMOP_HLP_MIN_186(); 8824 8824 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 8825 switch (p IemCpu->enmEffOpSize)8825 switch (pVCpu->iem.s.enmEffOpSize) 8826 8826 { 8827 8827 case IEMMODE_16BIT: … … 8871 8871 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF); 8872 8872 8873 switch (p IemCpu->enmEffOpSize)8873 switch (pVCpu->iem.s.enmEffOpSize) 8874 8874 { 8875 8875 case IEMMODE_16BIT: … … 8887 8887 IEM_MC_LOCAL(uint16_t, u16Tmp); 8888 8888 8889 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);8889 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 8890 8890 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp); 8891 8891 IEM_MC_REF_EFLAGS(pEFlags); 8892 8892 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags); 8893 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Tmp);8893 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Tmp); 8894 8894 8895 8895 IEM_MC_ADVANCE_RIP(); … … 8910 8910 IEM_MC_ASSIGN(u16Src, u16Imm); 8911 8911 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 8912 IEM_MC_FETCH_MEM_U16(u16Tmp, p IemCpu->iEffSeg, GCPtrEffDst);8912 IEM_MC_FETCH_MEM_U16(u16Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 8913 8913 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp); 8914 8914 IEM_MC_REF_EFLAGS(pEFlags); 8915 8915 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags); 8916 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Tmp);8916 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Tmp); 8917 8917 8918 8918 IEM_MC_ADVANCE_RIP(); … … 8936 8936 IEM_MC_LOCAL(uint32_t, u32Tmp); 8937 8937 8938 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);8938 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 8939 8939 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp); 8940 8940 IEM_MC_REF_EFLAGS(pEFlags); 8941 8941 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags); 8942 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp);8942 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); 8943 8943 8944 8944 IEM_MC_ADVANCE_RIP(); … … 8959 8959 IEM_MC_ASSIGN(u32Src, u32Imm); 8960 8960 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 8961 IEM_MC_FETCH_MEM_U32(u32Tmp, p IemCpu->iEffSeg, GCPtrEffDst);8961 IEM_MC_FETCH_MEM_U32(u32Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 8962 8962 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp); 8963 8963 IEM_MC_REF_EFLAGS(pEFlags); 8964 8964 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags); 8965 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp);8965 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); 8966 8966 8967 8967 IEM_MC_ADVANCE_RIP(); … … 8985 8985 IEM_MC_LOCAL(uint64_t, u64Tmp); 8986 8986 8987 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);8987 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 8988 8988 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp); 8989 8989 IEM_MC_REF_EFLAGS(pEFlags); 8990 8990 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags); 8991 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp);8991 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); 8992 8992 8993 8993 IEM_MC_ADVANCE_RIP(); … … 9008 9008 IEM_MC_ASSIGN(u64Src, u64Imm); 9009 9009 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 9010 IEM_MC_FETCH_MEM_U64(u64Tmp, p IemCpu->iEffSeg, GCPtrEffDst);9010 IEM_MC_FETCH_MEM_U64(u64Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 9011 9011 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp); 9012 9012 IEM_MC_REF_EFLAGS(pEFlags); 9013 9013 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags); 9014 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp);9014 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); 9015 9015 9016 9016 IEM_MC_ADVANCE_RIP(); … … 9034 9034 9035 9035 IEM_MC_BEGIN(0,0); 9036 switch (p IemCpu->enmEffOpSize)9036 switch (pVCpu->iem.s.enmEffOpSize) 9037 9037 { 9038 9038 case IEMMODE_16BIT: … … 9060 9060 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF); 9061 9061 9062 switch (p IemCpu->enmEffOpSize)9062 switch (pVCpu->iem.s.enmEffOpSize) 9063 9063 { 9064 9064 case IEMMODE_16BIT: … … 9075 9075 IEM_MC_LOCAL(uint16_t, u16Tmp); 9076 9076 9077 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9077 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9078 9078 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp); 9079 9079 IEM_MC_REF_EFLAGS(pEFlags); 9080 9080 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags); 9081 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Tmp);9081 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Tmp); 9082 9082 9083 9083 IEM_MC_ADVANCE_RIP(); … … 9098 9098 IEM_MC_ASSIGN(u16Src, u16Imm); 9099 9099 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 9100 IEM_MC_FETCH_MEM_U16(u16Tmp, p IemCpu->iEffSeg, GCPtrEffDst);9100 IEM_MC_FETCH_MEM_U16(u16Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 9101 9101 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp); 9102 9102 IEM_MC_REF_EFLAGS(pEFlags); 9103 9103 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags); 9104 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Tmp);9104 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Tmp); 9105 9105 9106 9106 IEM_MC_ADVANCE_RIP(); … … 9122 9122 IEM_MC_LOCAL(uint32_t, u32Tmp); 9123 9123 9124 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9124 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9125 9125 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp); 9126 9126 IEM_MC_REF_EFLAGS(pEFlags); 9127 9127 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags); 9128 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp);9128 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); 9129 9129 9130 9130 IEM_MC_ADVANCE_RIP(); … … 9145 9145 IEM_MC_ASSIGN(u32Src, u32Imm); 9146 9146 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 9147 IEM_MC_FETCH_MEM_U32(u32Tmp, p IemCpu->iEffSeg, GCPtrEffDst);9147 IEM_MC_FETCH_MEM_U32(u32Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 9148 9148 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp); 9149 9149 IEM_MC_REF_EFLAGS(pEFlags); 9150 9150 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags); 9151 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Tmp);9151 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Tmp); 9152 9152 9153 9153 IEM_MC_ADVANCE_RIP(); … … 9169 9169 IEM_MC_LOCAL(uint64_t, u64Tmp); 9170 9170 9171 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9171 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9172 9172 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp); 9173 9173 IEM_MC_REF_EFLAGS(pEFlags); 9174 9174 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags); 9175 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp);9175 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); 9176 9176 9177 9177 IEM_MC_ADVANCE_RIP(); … … 9192 9192 IEM_MC_ASSIGN(u64Src, u64Imm); 9193 9193 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 9194 IEM_MC_FETCH_MEM_U64(u64Tmp, p IemCpu->iEffSeg, GCPtrEffDst);9194 IEM_MC_FETCH_MEM_U64(u64Tmp, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 9195 9195 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp); 9196 9196 IEM_MC_REF_EFLAGS(pEFlags); 9197 9197 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags); 9198 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Tmp);9198 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Tmp); 9199 9199 9200 9200 IEM_MC_ADVANCE_RIP(); … … 9212 9212 IEMOP_HLP_MIN_186(); 9213 9213 IEMOP_HLP_NO_LOCK_PREFIX(); 9214 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))9214 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 9215 9215 { 9216 9216 IEMOP_MNEMONIC("rep ins Yb,DX"); 9217 switch (p IemCpu->enmEffAddrMode)9217 switch (pVCpu->iem.s.enmEffAddrMode) 9218 9218 { 9219 9219 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false); … … 9226 9226 { 9227 9227 IEMOP_MNEMONIC("ins Yb,DX"); 9228 switch (p IemCpu->enmEffAddrMode)9228 switch (pVCpu->iem.s.enmEffAddrMode) 9229 9229 { 9230 9230 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false); … … 9242 9242 IEMOP_HLP_MIN_186(); 9243 9243 IEMOP_HLP_NO_LOCK_PREFIX(); 9244 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))9244 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 9245 9245 { 9246 9246 IEMOP_MNEMONIC("rep ins Yv,DX"); 9247 switch (p IemCpu->enmEffOpSize)9247 switch (pVCpu->iem.s.enmEffOpSize) 9248 9248 { 9249 9249 case IEMMODE_16BIT: 9250 switch (p IemCpu->enmEffAddrMode)9250 switch (pVCpu->iem.s.enmEffAddrMode) 9251 9251 { 9252 9252 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false); … … 9258 9258 case IEMMODE_64BIT: 9259 9259 case IEMMODE_32BIT: 9260 switch (p IemCpu->enmEffAddrMode)9260 switch (pVCpu->iem.s.enmEffAddrMode) 9261 9261 { 9262 9262 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false); … … 9272 9272 { 9273 9273 IEMOP_MNEMONIC("ins Yv,DX"); 9274 switch (p IemCpu->enmEffOpSize)9274 switch (pVCpu->iem.s.enmEffOpSize) 9275 9275 { 9276 9276 case IEMMODE_16BIT: 9277 switch (p IemCpu->enmEffAddrMode)9277 switch (pVCpu->iem.s.enmEffAddrMode) 9278 9278 { 9279 9279 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false); … … 9285 9285 case IEMMODE_64BIT: 9286 9286 case IEMMODE_32BIT: 9287 switch (p IemCpu->enmEffAddrMode)9287 switch (pVCpu->iem.s.enmEffAddrMode) 9288 9288 { 9289 9289 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false); … … 9304 9304 IEMOP_HLP_MIN_186(); 9305 9305 IEMOP_HLP_NO_LOCK_PREFIX(); 9306 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))9306 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 9307 9307 { 9308 9308 IEMOP_MNEMONIC("rep outs DX,Yb"); 9309 switch (p IemCpu->enmEffAddrMode)9309 switch (pVCpu->iem.s.enmEffAddrMode) 9310 9310 { 9311 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, p IemCpu->iEffSeg, false);9312 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, p IemCpu->iEffSeg, false);9313 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, p IemCpu->iEffSeg, false);9311 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pVCpu->iem.s.iEffSeg, false); 9312 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pVCpu->iem.s.iEffSeg, false); 9313 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pVCpu->iem.s.iEffSeg, false); 9314 9314 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 9315 9315 } … … 9318 9318 { 9319 9319 IEMOP_MNEMONIC("outs DX,Yb"); 9320 switch (p IemCpu->enmEffAddrMode)9320 switch (pVCpu->iem.s.enmEffAddrMode) 9321 9321 { 9322 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, p IemCpu->iEffSeg, false);9323 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, p IemCpu->iEffSeg, false);9324 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, p IemCpu->iEffSeg, false);9322 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pVCpu->iem.s.iEffSeg, false); 9323 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pVCpu->iem.s.iEffSeg, false); 9324 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pVCpu->iem.s.iEffSeg, false); 9325 9325 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 9326 9326 } … … 9334 9334 IEMOP_HLP_MIN_186(); 9335 9335 IEMOP_HLP_NO_LOCK_PREFIX(); 9336 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))9336 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ)) 9337 9337 { 9338 9338 IEMOP_MNEMONIC("rep outs DX,Yv"); 9339 switch (p IemCpu->enmEffOpSize)9339 switch (pVCpu->iem.s.enmEffOpSize) 9340 9340 { 9341 9341 case IEMMODE_16BIT: 9342 switch (p IemCpu->enmEffAddrMode)9342 switch (pVCpu->iem.s.enmEffAddrMode) 9343 9343 { 9344 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, p IemCpu->iEffSeg, false);9345 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, p IemCpu->iEffSeg, false);9346 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, p IemCpu->iEffSeg, false);9344 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pVCpu->iem.s.iEffSeg, false); 9345 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pVCpu->iem.s.iEffSeg, false); 9346 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pVCpu->iem.s.iEffSeg, false); 9347 9347 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 9348 9348 } … … 9350 9350 case IEMMODE_64BIT: 9351 9351 case IEMMODE_32BIT: 9352 switch (p IemCpu->enmEffAddrMode)9352 switch (pVCpu->iem.s.enmEffAddrMode) 9353 9353 { 9354 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, p IemCpu->iEffSeg, false);9355 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, p IemCpu->iEffSeg, false);9356 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, p IemCpu->iEffSeg, false);9354 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pVCpu->iem.s.iEffSeg, false); 9355 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pVCpu->iem.s.iEffSeg, false); 9356 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pVCpu->iem.s.iEffSeg, false); 9357 9357 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 9358 9358 } … … 9364 9364 { 9365 9365 IEMOP_MNEMONIC("outs DX,Yv"); 9366 switch (p IemCpu->enmEffOpSize)9366 switch (pVCpu->iem.s.enmEffOpSize) 9367 9367 { 9368 9368 case IEMMODE_16BIT: 9369 switch (p IemCpu->enmEffAddrMode)9369 switch (pVCpu->iem.s.enmEffAddrMode) 9370 9370 { 9371 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, p IemCpu->iEffSeg, false);9372 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, p IemCpu->iEffSeg, false);9373 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, p IemCpu->iEffSeg, false);9371 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pVCpu->iem.s.iEffSeg, false); 9372 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pVCpu->iem.s.iEffSeg, false); 9373 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pVCpu->iem.s.iEffSeg, false); 9374 9374 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 9375 9375 } … … 9377 9377 case IEMMODE_64BIT: 9378 9378 case IEMMODE_32BIT: 9379 switch (p IemCpu->enmEffAddrMode)9379 switch (pVCpu->iem.s.enmEffAddrMode) 9380 9380 { 9381 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, p IemCpu->iEffSeg, false);9382 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, p IemCpu->iEffSeg, false);9383 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, p IemCpu->iEffSeg, false);9381 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pVCpu->iem.s.iEffSeg, false); 9382 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pVCpu->iem.s.iEffSeg, false); 9383 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pVCpu->iem.s.iEffSeg, false); 9384 9384 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 9385 9385 } … … 9711 9711 IEM_MC_ARG(uint32_t *, pEFlags, 2); 9712 9712 9713 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9713 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9714 9714 IEM_MC_REF_EFLAGS(pEFlags); 9715 9715 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags); … … 9738 9738 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); 9739 9739 9740 IEM_MC_MEM_MAP(pu8Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);9740 IEM_MC_MEM_MAP(pu8Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 9741 9741 IEM_MC_FETCH_EFLAGS(EFlags); 9742 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))9742 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 9743 9743 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags); 9744 9744 else … … 9761 9761 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK]; 9762 9762 9763 switch (p IemCpu->enmEffOpSize)9763 switch (pVCpu->iem.s.enmEffOpSize) 9764 9764 { 9765 9765 case IEMMODE_16BIT: … … 9775 9775 IEM_MC_ARG(uint32_t *, pEFlags, 2); 9776 9776 9777 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9777 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9778 9778 IEM_MC_REF_EFLAGS(pEFlags); 9779 9779 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); … … 9802 9802 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); 9803 9803 IEM_MC_ASSIGN(u16Src, u16Imm); 9804 IEM_MC_MEM_MAP(pu16Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);9804 IEM_MC_MEM_MAP(pu16Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 9805 9805 IEM_MC_FETCH_EFLAGS(EFlags); 9806 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))9806 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 9807 9807 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); 9808 9808 else … … 9829 9829 IEM_MC_ARG(uint32_t *, pEFlags, 2); 9830 9830 9831 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9831 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9832 9832 IEM_MC_REF_EFLAGS(pEFlags); 9833 9833 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); … … 9857 9857 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); 9858 9858 IEM_MC_ASSIGN(u32Src, u32Imm); 9859 IEM_MC_MEM_MAP(pu32Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);9859 IEM_MC_MEM_MAP(pu32Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 9860 9860 IEM_MC_FETCH_EFLAGS(EFlags); 9861 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))9861 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 9862 9862 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); 9863 9863 else … … 9884 9884 IEM_MC_ARG(uint32_t *, pEFlags, 2); 9885 9885 9886 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9886 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9887 9887 IEM_MC_REF_EFLAGS(pEFlags); 9888 9888 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); … … 9911 9911 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); 9912 9912 IEM_MC_ASSIGN(u64Src, u64Imm); 9913 IEM_MC_MEM_MAP(pu64Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);9913 IEM_MC_MEM_MAP(pu64Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 9914 9914 IEM_MC_FETCH_EFLAGS(EFlags); 9915 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))9915 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 9916 9916 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); 9917 9917 else … … 9955 9955 IEMOP_HLP_NO_LOCK_PREFIX(); 9956 9956 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 9957 switch (p IemCpu->enmEffOpSize)9957 switch (pVCpu->iem.s.enmEffOpSize) 9958 9958 { 9959 9959 case IEMMODE_16BIT: … … 9964 9964 IEM_MC_ARG(uint32_t *, pEFlags, 2); 9965 9965 9966 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9966 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9967 9967 IEM_MC_REF_EFLAGS(pEFlags); 9968 9968 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); … … 9980 9980 IEM_MC_ARG(uint32_t *, pEFlags, 2); 9981 9981 9982 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9982 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 9983 9983 IEM_MC_REF_EFLAGS(pEFlags); 9984 9984 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); … … 9997 9997 IEM_MC_ARG(uint32_t *, pEFlags, 2); 9998 9998 9999 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);9999 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10000 10000 IEM_MC_REF_EFLAGS(pEFlags); 10001 10001 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); … … 10021 10021 } 10022 10022 10023 switch (p IemCpu->enmEffOpSize)10023 switch (pVCpu->iem.s.enmEffOpSize) 10024 10024 { 10025 10025 case IEMMODE_16BIT: … … 10034 10034 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 10035 10035 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm); 10036 IEM_MC_MEM_MAP(pu16Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);10036 IEM_MC_MEM_MAP(pu16Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 10037 10037 IEM_MC_FETCH_EFLAGS(EFlags); 10038 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))10038 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 10039 10039 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); 10040 10040 else … … 10059 10059 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 10060 10060 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm); 10061 IEM_MC_MEM_MAP(pu32Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);10061 IEM_MC_MEM_MAP(pu32Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 10062 10062 IEM_MC_FETCH_EFLAGS(EFlags); 10063 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))10063 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 10064 10064 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); 10065 10065 else … … 10084 10084 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 10085 10085 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm); 10086 IEM_MC_MEM_MAP(pu64Dst, fAccess, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);10086 IEM_MC_MEM_MAP(pu64Dst, fAccess, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 10087 10087 IEM_MC_FETCH_EFLAGS(EFlags); 10088 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))10088 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 10089 10089 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); 10090 10090 else … … 10140 10140 IEM_MC_LOCAL(uint8_t, uTmp2); 10141 10141 10142 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10143 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10144 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, uTmp1);10145 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, uTmp2);10142 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10143 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10144 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, uTmp1); 10145 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, uTmp2); 10146 10146 10147 10147 IEM_MC_ADVANCE_RIP(); … … 10160 10160 10161 10161 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10162 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);10163 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10162 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 10163 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10164 10164 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg); 10165 10165 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW); … … 10185 10185 IEMOP_HLP_NO_LOCK_PREFIX(); 10186 10186 10187 switch (p IemCpu->enmEffOpSize)10187 switch (pVCpu->iem.s.enmEffOpSize) 10188 10188 { 10189 10189 case IEMMODE_16BIT: … … 10192 10192 IEM_MC_LOCAL(uint16_t, uTmp2); 10193 10193 10194 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10195 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10196 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, uTmp1);10197 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, uTmp2);10194 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10195 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10196 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, uTmp1); 10197 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, uTmp2); 10198 10198 10199 10199 IEM_MC_ADVANCE_RIP(); … … 10206 10206 IEM_MC_LOCAL(uint32_t, uTmp2); 10207 10207 10208 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10209 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10210 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, uTmp1);10211 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, uTmp2);10208 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10209 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10210 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, uTmp1); 10211 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, uTmp2); 10212 10212 10213 10213 IEM_MC_ADVANCE_RIP(); … … 10220 10220 IEM_MC_LOCAL(uint64_t, uTmp2); 10221 10221 10222 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10223 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10224 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, uTmp1);10225 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, uTmp2);10222 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10223 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10224 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, uTmp1); 10225 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, uTmp2); 10226 10226 10227 10227 IEM_MC_ADVANCE_RIP(); … … 10237 10237 * We're accessing memory. 10238 10238 */ 10239 switch (p IemCpu->enmEffOpSize)10239 switch (pVCpu->iem.s.enmEffOpSize) 10240 10240 { 10241 10241 /** @todo the register must be committed separately! */ … … 10247 10247 10248 10248 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10249 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);10250 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10249 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 10250 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10251 10251 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg); 10252 10252 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW); … … 10263 10263 10264 10264 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10265 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);10266 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10265 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 10266 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10267 10267 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg); 10268 10268 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW); … … 10280 10280 10281 10281 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10282 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);10283 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10282 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 10283 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10284 10284 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg); 10285 10285 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW); … … 10311 10311 IEM_MC_BEGIN(0, 1); 10312 10312 IEM_MC_LOCAL(uint8_t, u8Value); 10313 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10314 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u8Value);10313 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10314 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u8Value); 10315 10315 IEM_MC_ADVANCE_RIP(); 10316 10316 IEM_MC_END(); … … 10325 10325 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10326 10326 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10327 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10328 IEM_MC_STORE_MEM_U8(p IemCpu->iEffSeg, GCPtrEffDst, u8Value);10327 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10328 IEM_MC_STORE_MEM_U8(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u8Value); 10329 10329 IEM_MC_ADVANCE_RIP(); 10330 10330 IEM_MC_END(); … … 10348 10348 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 10349 10349 { 10350 switch (p IemCpu->enmEffOpSize)10350 switch (pVCpu->iem.s.enmEffOpSize) 10351 10351 { 10352 10352 case IEMMODE_16BIT: 10353 10353 IEM_MC_BEGIN(0, 1); 10354 10354 IEM_MC_LOCAL(uint16_t, u16Value); 10355 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10356 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u16Value);10355 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10356 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u16Value); 10357 10357 IEM_MC_ADVANCE_RIP(); 10358 10358 IEM_MC_END(); … … 10362 10362 IEM_MC_BEGIN(0, 1); 10363 10363 IEM_MC_LOCAL(uint32_t, u32Value); 10364 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10365 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Value);10364 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10365 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Value); 10366 10366 IEM_MC_ADVANCE_RIP(); 10367 10367 IEM_MC_END(); … … 10371 10371 IEM_MC_BEGIN(0, 1); 10372 10372 IEM_MC_LOCAL(uint64_t, u64Value); 10373 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10374 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Value);10373 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10374 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Value); 10375 10375 IEM_MC_ADVANCE_RIP(); 10376 10376 IEM_MC_END(); … … 10383 10383 * We're writing a register to memory. 10384 10384 */ 10385 switch (p IemCpu->enmEffOpSize)10385 switch (pVCpu->iem.s.enmEffOpSize) 10386 10386 { 10387 10387 case IEMMODE_16BIT: … … 10390 10390 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10391 10391 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10392 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10393 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Value);10392 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10393 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Value); 10394 10394 IEM_MC_ADVANCE_RIP(); 10395 10395 IEM_MC_END(); … … 10401 10401 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10402 10402 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10403 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10404 IEM_MC_STORE_MEM_U32(p IemCpu->iEffSeg, GCPtrEffDst, u32Value);10403 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10404 IEM_MC_STORE_MEM_U32(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u32Value); 10405 10405 IEM_MC_ADVANCE_RIP(); 10406 10406 IEM_MC_END(); … … 10412 10412 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10413 10413 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10414 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg);10415 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffDst, u64Value);10414 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 10415 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u64Value); 10416 10416 IEM_MC_ADVANCE_RIP(); 10417 10417 IEM_MC_END(); … … 10438 10438 IEM_MC_BEGIN(0, 1); 10439 10439 IEM_MC_LOCAL(uint8_t, u8Value); 10440 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10441 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u8Value);10440 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10441 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u8Value); 10442 10442 IEM_MC_ADVANCE_RIP(); 10443 10443 IEM_MC_END(); … … 10452 10452 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10453 10453 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10454 IEM_MC_FETCH_MEM_U8(u8Value, p IemCpu->iEffSeg, GCPtrEffDst);10455 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u8Value);10454 IEM_MC_FETCH_MEM_U8(u8Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10455 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u8Value); 10456 10456 IEM_MC_ADVANCE_RIP(); 10457 10457 IEM_MC_END(); … … 10474 10474 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 10475 10475 { 10476 switch (p IemCpu->enmEffOpSize)10476 switch (pVCpu->iem.s.enmEffOpSize) 10477 10477 { 10478 10478 case IEMMODE_16BIT: 10479 10479 IEM_MC_BEGIN(0, 1); 10480 10480 IEM_MC_LOCAL(uint16_t, u16Value); 10481 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10482 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Value);10481 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10482 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Value); 10483 10483 IEM_MC_ADVANCE_RIP(); 10484 10484 IEM_MC_END(); … … 10488 10488 IEM_MC_BEGIN(0, 1); 10489 10489 IEM_MC_LOCAL(uint32_t, u32Value); 10490 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10491 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);10490 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10491 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 10492 10492 IEM_MC_ADVANCE_RIP(); 10493 10493 IEM_MC_END(); … … 10497 10497 IEM_MC_BEGIN(0, 1); 10498 10498 IEM_MC_LOCAL(uint64_t, u64Value); 10499 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10500 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);10499 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10500 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 10501 10501 IEM_MC_ADVANCE_RIP(); 10502 10502 IEM_MC_END(); … … 10509 10509 * We're loading a register from memory. 10510 10510 */ 10511 switch (p IemCpu->enmEffOpSize)10511 switch (pVCpu->iem.s.enmEffOpSize) 10512 10512 { 10513 10513 case IEMMODE_16BIT: … … 10516 10516 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10517 10517 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10518 IEM_MC_FETCH_MEM_U16(u16Value, p IemCpu->iEffSeg, GCPtrEffDst);10519 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Value);10518 IEM_MC_FETCH_MEM_U16(u16Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10519 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Value); 10520 10520 IEM_MC_ADVANCE_RIP(); 10521 10521 IEM_MC_END(); … … 10527 10527 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10528 10528 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10529 IEM_MC_FETCH_MEM_U32(u32Value, p IemCpu->iEffSeg, GCPtrEffDst);10530 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Value);10529 IEM_MC_FETCH_MEM_U32(u32Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10530 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Value); 10531 10531 IEM_MC_ADVANCE_RIP(); 10532 10532 IEM_MC_END(); … … 10538 10538 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10539 10539 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10540 IEM_MC_FETCH_MEM_U64(u64Value, p IemCpu->iEffSeg, GCPtrEffDst);10541 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u64Value);10540 IEM_MC_FETCH_MEM_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10541 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u64Value); 10542 10542 IEM_MC_ADVANCE_RIP(); 10543 10543 IEM_MC_END(); … … 10552 10552 FNIEMOP_DEF(iemOp_arpl_Ew_Gw_movsx_Gv_Ev) 10553 10553 { 10554 if (p IemCpu->enmCpuMode != IEMMODE_64BIT)10554 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) 10555 10555 return FNIEMOP_CALL(iemOp_arpl_Ew_Gw); 10556 if (p IemCpu->enmEffOpSize != IEMMODE_64BIT)10556 if (pVCpu->iem.s.enmEffOpSize != IEMMODE_64BIT) 10557 10557 return FNIEMOP_CALL(iemOp_mov_Gv_Ev); 10558 10558 return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev); … … 10582 10582 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 10583 10583 { 10584 switch (p IemCpu->enmEffOpSize)10584 switch (pVCpu->iem.s.enmEffOpSize) 10585 10585 { 10586 10586 case IEMMODE_16BIT: … … 10588 10588 IEM_MC_LOCAL(uint16_t, u16Value); 10589 10589 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg); 10590 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u16Value);10590 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u16Value); 10591 10591 IEM_MC_ADVANCE_RIP(); 10592 10592 IEM_MC_END(); … … 10597 10597 IEM_MC_LOCAL(uint32_t, u32Value); 10598 10598 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg); 10599 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Value);10599 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Value); 10600 10600 IEM_MC_ADVANCE_RIP(); 10601 10601 IEM_MC_END(); … … 10606 10606 IEM_MC_LOCAL(uint64_t, u64Value); 10607 10607 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg); 10608 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Value);10608 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Value); 10609 10609 IEM_MC_ADVANCE_RIP(); 10610 10610 IEM_MC_END(); … … 10619 10619 */ 10620 10620 #if 0 /* not necessary */ 10621 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;10621 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_16BIT; 10622 10622 #endif 10623 10623 IEM_MC_BEGIN(0, 2); … … 10626 10626 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10627 10627 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg); 10628 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Value);10628 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Value); 10629 10629 IEM_MC_ADVANCE_RIP(); 10630 10630 IEM_MC_END(); … … 10645 10645 return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */ 10646 10646 10647 switch (p IemCpu->enmEffOpSize)10647 switch (pVCpu->iem.s.enmEffOpSize) 10648 10648 { 10649 10649 case IEMMODE_16BIT: … … 10653 10653 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 10654 10654 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc); 10655 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u16Cast);10655 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u16Cast); 10656 10656 IEM_MC_ADVANCE_RIP(); 10657 10657 IEM_MC_END(); … … 10664 10664 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 10665 10665 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc); 10666 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, u32Cast);10666 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, u32Cast); 10667 10667 IEM_MC_ADVANCE_RIP(); 10668 10668 IEM_MC_END(); … … 10673 10673 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 10674 10674 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 10675 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | p IemCpu->uRexReg, GCPtrEffSrc);10675 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, GCPtrEffSrc); 10676 10676 IEM_MC_ADVANCE_RIP(); 10677 10677 IEM_MC_END(); … … 10694 10694 */ 10695 10695 #if 0 /* not necessary */ 10696 p IemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;10696 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_16BIT; 10697 10697 #endif 10698 10698 … … 10714 10714 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0); 10715 10715 IEM_MC_ARG(uint16_t, u16Value, 1); 10716 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10716 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10717 10717 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value); 10718 10718 IEM_MC_END(); … … 10729 10729 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10730 10730 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10731 IEM_MC_FETCH_MEM_U16(u16Value, p IemCpu->iEffSeg, GCPtrEffDst);10731 IEM_MC_FETCH_MEM_U16(u16Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10732 10732 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value); 10733 10733 IEM_MC_END(); … … 10755 10755 /* Register access is relatively easy and can share code. */ 10756 10756 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 10757 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);10757 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 10758 10758 10759 10759 /* … … 10772 10772 #ifndef TST_IEM_CHECK_MC 10773 10773 /* Calc effective address with modified ESP. */ 10774 uint8_t const offOpcodeSaved = p IemCpu->offOpcode;10774 uint8_t const offOpcodeSaved = pVCpu->iem.s.offOpcode; 10775 10775 RTGCPTR GCPtrEff; 10776 10776 VBOXSTRICTRC rcStrict; 10777 rcStrict = iemOpHlpCalcRmEffAddr(p IemCpu, bRm, 0, &GCPtrEff);10777 rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); 10778 10778 if (rcStrict != VINF_SUCCESS) 10779 10779 return rcStrict; 10780 p IemCpu->offOpcode = offOpcodeSaved;10781 10782 PCPUMCTX pCtx = p IemCpu->CTX_SUFF(pCtx);10780 pVCpu->iem.s.offOpcode = offOpcodeSaved; 10781 10782 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 10783 10783 uint64_t const RspSaved = pCtx->rsp; 10784 switch (p IemCpu->enmEffOpSize)10785 { 10786 case IEMMODE_16BIT: iemRegAddToRsp(p IemCpu, pCtx, 2); break;10787 case IEMMODE_32BIT: iemRegAddToRsp(p IemCpu, pCtx, 4); break;10788 case IEMMODE_64BIT: iemRegAddToRsp(p IemCpu, pCtx, 8); break;10784 switch (pVCpu->iem.s.enmEffOpSize) 10785 { 10786 case IEMMODE_16BIT: iemRegAddToRsp(pVCpu, pCtx, 2); break; 10787 case IEMMODE_32BIT: iemRegAddToRsp(pVCpu, pCtx, 4); break; 10788 case IEMMODE_64BIT: iemRegAddToRsp(pVCpu, pCtx, 8); break; 10789 10789 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 10790 10790 } 10791 rcStrict = iemOpHlpCalcRmEffAddr(p IemCpu, bRm, 0, &GCPtrEff);10791 rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); 10792 10792 Assert(rcStrict == VINF_SUCCESS); 10793 10793 pCtx->rsp = RspSaved; … … 10796 10796 RTUINT64U TmpRsp; 10797 10797 TmpRsp.u = pCtx->rsp; 10798 switch (p IemCpu->enmEffOpSize)10798 switch (pVCpu->iem.s.enmEffOpSize) 10799 10799 { 10800 10800 case IEMMODE_16BIT: 10801 10801 { 10802 10802 uint16_t u16Value; 10803 rcStrict = iemMemStackPopU16Ex(p IemCpu, &u16Value, &TmpRsp);10803 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp); 10804 10804 if (rcStrict == VINF_SUCCESS) 10805 rcStrict = iemMemStoreDataU16(p IemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);10805 rcStrict = iemMemStoreDataU16(pVCpu, pVCpu->iem.s.iEffSeg, GCPtrEff, u16Value); 10806 10806 break; 10807 10807 } … … 10810 10810 { 10811 10811 uint32_t u32Value; 10812 rcStrict = iemMemStackPopU32Ex(p IemCpu, &u32Value, &TmpRsp);10812 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp); 10813 10813 if (rcStrict == VINF_SUCCESS) 10814 rcStrict = iemMemStoreDataU32(p IemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);10814 rcStrict = iemMemStoreDataU32(pVCpu, pVCpu->iem.s.iEffSeg, GCPtrEff, u32Value); 10815 10815 break; 10816 10816 } … … 10819 10819 { 10820 10820 uint64_t u64Value; 10821 rcStrict = iemMemStackPopU64Ex(p IemCpu, &u64Value, &TmpRsp);10821 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp); 10822 10822 if (rcStrict == VINF_SUCCESS) 10823 rcStrict = iemMemStoreDataU64(p IemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);10823 rcStrict = iemMemStoreDataU64(pVCpu, pVCpu->iem.s.iEffSeg, GCPtrEff, u64Value); 10824 10824 break; 10825 10825 } … … 10830 10830 { 10831 10831 pCtx->rsp = TmpRsp.u; 10832 iemRegUpdateRipAndClearRF(p IemCpu);10832 iemRegUpdateRipAndClearRF(pVCpu); 10833 10833 } 10834 10834 return rcStrict; … … 10861 10861 IEMOP_HLP_NO_LOCK_PREFIX(); 10862 10862 10863 iReg |= p IemCpu->uRexB;10864 switch (p IemCpu->enmEffOpSize)10863 iReg |= pVCpu->iem.s.uRexB; 10864 switch (pVCpu->iem.s.enmEffOpSize) 10865 10865 { 10866 10866 case IEMMODE_16BIT: … … 10909 10909 { 10910 10910 /* R8/R8D and RAX/EAX can be exchanged. */ 10911 if (p IemCpu->fPrefixes & IEM_OP_PRF_REX_B)10911 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX_B) 10912 10912 { 10913 10913 IEMOP_MNEMONIC("xchg r8,rAX"); … … 10915 10915 } 10916 10916 10917 if (p IemCpu->fPrefixes & IEM_OP_PRF_LOCK)10917 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK) 10918 10918 IEMOP_MNEMONIC("pause"); 10919 10919 else … … 10986 10986 { 10987 10987 IEMOP_HLP_NO_LOCK_PREFIX(); 10988 switch (p IemCpu->enmEffOpSize)10988 switch (pVCpu->iem.s.enmEffOpSize) 10989 10989 { 10990 10990 case IEMMODE_16BIT: … … 11033 11033 { 11034 11034 IEMOP_HLP_NO_LOCK_PREFIX(); 11035 switch (p IemCpu->enmEffOpSize)11035 switch (pVCpu->iem.s.enmEffOpSize) 11036 11036 { 11037 11037 case IEMMODE_16BIT: … … 11084 11084 /* Decode the far pointer address and pass it on to the far call C implementation. */ 11085 11085 uint32_t offSeg; 11086 if (p IemCpu->enmEffOpSize != IEMMODE_16BIT)11086 if (pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT) 11087 11087 IEM_OPCODE_GET_NEXT_U32(&offSeg); 11088 11088 else … … 11090 11090 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel); 11091 11091 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 11092 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, p IemCpu->enmEffOpSize);11092 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pVCpu->iem.s.enmEffOpSize); 11093 11093 } 11094 11094 … … 11114 11114 IEMOP_HLP_NO_LOCK_PREFIX(); 11115 11115 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 11116 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, p IemCpu->enmEffOpSize);11116 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pVCpu->iem.s.enmEffOpSize); 11117 11117 } 11118 11118 … … 11123 11123 IEMOP_HLP_NO_LOCK_PREFIX(); 11124 11124 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 11125 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, p IemCpu->enmEffOpSize);11125 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pVCpu->iem.s.enmEffOpSize); 11126 11126 } 11127 11127 … … 11132 11132 IEMOP_MNEMONIC("sahf"); 11133 11133 IEMOP_HLP_NO_LOCK_PREFIX(); 11134 if ( p IemCpu->enmCpuMode == IEMMODE_64BIT11135 && !IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fLahfSahf)11134 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT 11135 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLahfSahf) 11136 11136 return IEMOP_RAISE_INVALID_OPCODE(); 11137 11137 IEM_MC_BEGIN(0, 2); … … 11156 11156 IEMOP_MNEMONIC("lahf"); 11157 11157 IEMOP_HLP_NO_LOCK_PREFIX(); 11158 if ( p IemCpu->enmCpuMode == IEMMODE_64BIT11159 && !IEM_GET_GUEST_CPU_FEATURES(p IemCpu)->fLahfSahf)11158 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT 11159 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLahfSahf) 11160 11160 return IEMOP_RAISE_INVALID_OPCODE(); 11161 11161 IEM_MC_BEGIN(0, 1); … … 11178 11178 do \ 11179 11179 { \ 11180 switch (p IemCpu->enmEffAddrMode) \11180 switch (pVCpu->iem.s.enmEffAddrMode) \ 11181 11181 { \ 11182 11182 case IEMMODE_16BIT: \ … … 11208 11208 IEM_MC_BEGIN(0,1); 11209 11209 IEM_MC_LOCAL(uint8_t, u8Tmp); 11210 IEM_MC_FETCH_MEM_U8(u8Tmp, p IemCpu->iEffSeg, GCPtrMemOff);11210 IEM_MC_FETCH_MEM_U8(u8Tmp, pVCpu->iem.s.iEffSeg, GCPtrMemOff); 11211 11211 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp); 11212 11212 IEM_MC_ADVANCE_RIP(); … … 11229 11229 * Fetch rAX. 11230 11230 */ 11231 switch (p IemCpu->enmEffOpSize)11231 switch (pVCpu->iem.s.enmEffOpSize) 11232 11232 { 11233 11233 case IEMMODE_16BIT: 11234 11234 IEM_MC_BEGIN(0,1); 11235 11235 IEM_MC_LOCAL(uint16_t, u16Tmp); 11236 IEM_MC_FETCH_MEM_U16(u16Tmp, p IemCpu->iEffSeg, GCPtrMemOff);11236 IEM_MC_FETCH_MEM_U16(u16Tmp, pVCpu->iem.s.iEffSeg, GCPtrMemOff); 11237 11237 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp); 11238 11238 IEM_MC_ADVANCE_RIP(); … … 11243 11243 IEM_MC_BEGIN(0,1); 11244 11244 IEM_MC_LOCAL(uint32_t, u32Tmp); 11245 IEM_MC_FETCH_MEM_U32(u32Tmp, p IemCpu->iEffSeg, GCPtrMemOff);11245 IEM_MC_FETCH_MEM_U32(u32Tmp, pVCpu->iem.s.iEffSeg, GCPtrMemOff); 11246 11246 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp); 11247 11247 IEM_MC_ADVANCE_RIP(); … … 11252 11252 IEM_MC_BEGIN(0,1); 11253 11253 IEM_MC_LOCAL(uint64_t, u64Tmp); 11254 IEM_MC_FETCH_MEM_U64(u64Tmp, p IemCpu->iEffSeg, GCPtrMemOff);11254 IEM_MC_FETCH_MEM_U64(u64Tmp, pVCpu->iem.s.iEffSeg, GCPtrMemOff); 11255 11255 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp); 11256 11256 IEM_MC_ADVANCE_RIP(); … … 11278 11278 IEM_MC_LOCAL(uint8_t, u8Tmp); 11279 11279 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX); 11280 IEM_MC_STORE_MEM_U8(p IemCpu->iEffSeg, GCPtrMemOff, u8Tmp);11280 IEM_MC_STORE_MEM_U8(pVCpu->iem.s.iEffSeg, GCPtrMemOff, u8Tmp); 11281 11281 IEM_MC_ADVANCE_RIP(); 11282 11282 IEM_MC_END(); … … 11297 11297 * Store rAX. 11298 11298 */ 11299 switch (p IemCpu->enmEffOpSize)11299 switch (pVCpu->iem.s.enmEffOpSize) 11300 11300 { 11301 11301 case IEMMODE_16BIT: … … 11303 11303 IEM_MC_LOCAL(uint16_t, u16Tmp); 11304 11304 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX); 11305 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrMemOff, u16Tmp);11305 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrMemOff, u16Tmp); 11306 11306 IEM_MC_ADVANCE_RIP(); 11307 11307 IEM_MC_END(); … … 11312 11312 IEM_MC_LOCAL(uint32_t, u32Tmp); 11313 11313 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX); 11314 IEM_MC_STORE_MEM_U32(p IemCpu->iEffSeg, GCPtrMemOff, u32Tmp);11314 IEM_MC_STORE_MEM_U32(pVCpu->iem.s.iEffSeg, GCPtrMemOff, u32Tmp); 11315 11315 IEM_MC_ADVANCE_RIP(); 11316 11316 IEM_MC_END(); … … 11321 11321 IEM_MC_LOCAL(uint64_t, u64Tmp); 11322 11322 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX); 11323 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrMemOff, u64Tmp);11323 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrMemOff, u64Tmp); 11324 11324 IEM_MC_ADVANCE_RIP(); 11325 11325 IEM_MC_END(); … … 11336 11336 IEM_MC_LOCAL(RTGCPTR, uAddr); \ 11337 11337 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \ 11338 IEM_MC_FETCH_MEM_U##ValBits(uValue, p IemCpu->iEffSeg, uAddr); \11338 IEM_MC_FETCH_MEM_U##ValBits(uValue, pVCpu->iem.s.iEffSeg, uAddr); \ 11339 11339 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \ 11340 11340 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \ … … 11357 11357 * Use the C implementation if a repeat prefix is encountered. 11358 11358 */ 11359 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))11359 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 11360 11360 { 11361 11361 IEMOP_MNEMONIC("rep movsb Xb,Yb"); 11362 switch (p IemCpu->enmEffAddrMode)11362 switch (pVCpu->iem.s.enmEffAddrMode) 11363 11363 { 11364 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, p IemCpu->iEffSeg);11365 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, p IemCpu->iEffSeg);11366 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, p IemCpu->iEffSeg);11364 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pVCpu->iem.s.iEffSeg); 11365 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pVCpu->iem.s.iEffSeg); 11366 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pVCpu->iem.s.iEffSeg); 11367 11367 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11368 11368 } … … 11373 11373 * Sharing case implementation with movs[wdq] below. 11374 11374 */ 11375 switch (p IemCpu->enmEffAddrMode)11375 switch (pVCpu->iem.s.enmEffAddrMode) 11376 11376 { 11377 11377 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break; … … 11392 11392 * Use the C implementation if a repeat prefix is encountered. 11393 11393 */ 11394 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))11394 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 11395 11395 { 11396 11396 IEMOP_MNEMONIC("rep movs Xv,Yv"); 11397 switch (p IemCpu->enmEffOpSize)11397 switch (pVCpu->iem.s.enmEffOpSize) 11398 11398 { 11399 11399 case IEMMODE_16BIT: 11400 switch (p IemCpu->enmEffAddrMode)11400 switch (pVCpu->iem.s.enmEffAddrMode) 11401 11401 { 11402 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, p IemCpu->iEffSeg);11403 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, p IemCpu->iEffSeg);11404 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, p IemCpu->iEffSeg);11402 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pVCpu->iem.s.iEffSeg); 11403 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pVCpu->iem.s.iEffSeg); 11404 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pVCpu->iem.s.iEffSeg); 11405 11405 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11406 11406 } 11407 11407 break; 11408 11408 case IEMMODE_32BIT: 11409 switch (p IemCpu->enmEffAddrMode)11409 switch (pVCpu->iem.s.enmEffAddrMode) 11410 11410 { 11411 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, p IemCpu->iEffSeg);11412 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, p IemCpu->iEffSeg);11413 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, p IemCpu->iEffSeg);11411 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pVCpu->iem.s.iEffSeg); 11412 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pVCpu->iem.s.iEffSeg); 11413 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pVCpu->iem.s.iEffSeg); 11414 11414 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11415 11415 } 11416 11416 case IEMMODE_64BIT: 11417 switch (p IemCpu->enmEffAddrMode)11417 switch (pVCpu->iem.s.enmEffAddrMode) 11418 11418 { 11419 11419 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_6); 11420 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, p IemCpu->iEffSeg);11421 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, p IemCpu->iEffSeg);11420 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pVCpu->iem.s.iEffSeg); 11421 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pVCpu->iem.s.iEffSeg); 11422 11422 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11423 11423 } … … 11431 11431 * Using ugly macro for implementing the cases, sharing it with movsb. 11432 11432 */ 11433 switch (p IemCpu->enmEffOpSize)11433 switch (pVCpu->iem.s.enmEffOpSize) 11434 11434 { 11435 11435 case IEMMODE_16BIT: 11436 switch (p IemCpu->enmEffAddrMode)11436 switch (pVCpu->iem.s.enmEffAddrMode) 11437 11437 { 11438 11438 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break; … … 11444 11444 11445 11445 case IEMMODE_32BIT: 11446 switch (p IemCpu->enmEffAddrMode)11446 switch (pVCpu->iem.s.enmEffAddrMode) 11447 11447 { 11448 11448 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break; … … 11454 11454 11455 11455 case IEMMODE_64BIT: 11456 switch (p IemCpu->enmEffAddrMode)11456 switch (pVCpu->iem.s.enmEffAddrMode) 11457 11457 { 11458 11458 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break; … … 11479 11479 \ 11480 11480 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \ 11481 IEM_MC_FETCH_MEM_U##ValBits(uValue1, p IemCpu->iEffSeg, uAddr); \11481 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pVCpu->iem.s.iEffSeg, uAddr); \ 11482 11482 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \ 11483 11483 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \ … … 11504 11504 * Use the C implementation if a repeat prefix is encountered. 11505 11505 */ 11506 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPZ)11506 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPZ) 11507 11507 { 11508 11508 IEMOP_MNEMONIC("repe cmps Xb,Yb"); 11509 switch (p IemCpu->enmEffAddrMode)11509 switch (pVCpu->iem.s.enmEffAddrMode) 11510 11510 { 11511 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, p IemCpu->iEffSeg);11512 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, p IemCpu->iEffSeg);11513 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, p IemCpu->iEffSeg);11511 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pVCpu->iem.s.iEffSeg); 11512 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pVCpu->iem.s.iEffSeg); 11513 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pVCpu->iem.s.iEffSeg); 11514 11514 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11515 11515 } 11516 11516 } 11517 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPNZ)11517 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPNZ) 11518 11518 { 11519 11519 IEMOP_MNEMONIC("repe cmps Xb,Yb"); 11520 switch (p IemCpu->enmEffAddrMode)11520 switch (pVCpu->iem.s.enmEffAddrMode) 11521 11521 { 11522 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, p IemCpu->iEffSeg);11523 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, p IemCpu->iEffSeg);11524 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, p IemCpu->iEffSeg);11522 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pVCpu->iem.s.iEffSeg); 11523 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pVCpu->iem.s.iEffSeg); 11524 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pVCpu->iem.s.iEffSeg); 11525 11525 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11526 11526 } … … 11531 11531 * Sharing case implementation with cmps[wdq] below. 11532 11532 */ 11533 switch (p IemCpu->enmEffAddrMode)11533 switch (pVCpu->iem.s.enmEffAddrMode) 11534 11534 { 11535 11535 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break; … … 11551 11551 * Use the C implementation if a repeat prefix is encountered. 11552 11552 */ 11553 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPZ)11553 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPZ) 11554 11554 { 11555 11555 IEMOP_MNEMONIC("repe cmps Xv,Yv"); 11556 switch (p IemCpu->enmEffOpSize)11556 switch (pVCpu->iem.s.enmEffOpSize) 11557 11557 { 11558 11558 case IEMMODE_16BIT: 11559 switch (p IemCpu->enmEffAddrMode)11559 switch (pVCpu->iem.s.enmEffAddrMode) 11560 11560 { 11561 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, p IemCpu->iEffSeg);11562 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, p IemCpu->iEffSeg);11563 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, p IemCpu->iEffSeg);11561 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pVCpu->iem.s.iEffSeg); 11562 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pVCpu->iem.s.iEffSeg); 11563 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pVCpu->iem.s.iEffSeg); 11564 11564 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11565 11565 } 11566 11566 break; 11567 11567 case IEMMODE_32BIT: 11568 switch (p IemCpu->enmEffAddrMode)11568 switch (pVCpu->iem.s.enmEffAddrMode) 11569 11569 { 11570 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, p IemCpu->iEffSeg);11571 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, p IemCpu->iEffSeg);11572 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, p IemCpu->iEffSeg);11570 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pVCpu->iem.s.iEffSeg); 11571 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pVCpu->iem.s.iEffSeg); 11572 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pVCpu->iem.s.iEffSeg); 11573 11573 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11574 11574 } 11575 11575 case IEMMODE_64BIT: 11576 switch (p IemCpu->enmEffAddrMode)11576 switch (pVCpu->iem.s.enmEffAddrMode) 11577 11577 { 11578 11578 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_4); 11579 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, p IemCpu->iEffSeg);11580 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, p IemCpu->iEffSeg);11579 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pVCpu->iem.s.iEffSeg); 11580 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pVCpu->iem.s.iEffSeg); 11581 11581 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11582 11582 } … … 11585 11585 } 11586 11586 11587 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPNZ)11587 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPNZ) 11588 11588 { 11589 11589 IEMOP_MNEMONIC("repne cmps Xv,Yv"); 11590 switch (p IemCpu->enmEffOpSize)11590 switch (pVCpu->iem.s.enmEffOpSize) 11591 11591 { 11592 11592 case IEMMODE_16BIT: 11593 switch (p IemCpu->enmEffAddrMode)11593 switch (pVCpu->iem.s.enmEffAddrMode) 11594 11594 { 11595 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, p IemCpu->iEffSeg);11596 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, p IemCpu->iEffSeg);11597 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, p IemCpu->iEffSeg);11595 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pVCpu->iem.s.iEffSeg); 11596 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pVCpu->iem.s.iEffSeg); 11597 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pVCpu->iem.s.iEffSeg); 11598 11598 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11599 11599 } 11600 11600 break; 11601 11601 case IEMMODE_32BIT: 11602 switch (p IemCpu->enmEffAddrMode)11602 switch (pVCpu->iem.s.enmEffAddrMode) 11603 11603 { 11604 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, p IemCpu->iEffSeg);11605 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, p IemCpu->iEffSeg);11606 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, p IemCpu->iEffSeg);11604 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pVCpu->iem.s.iEffSeg); 11605 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pVCpu->iem.s.iEffSeg); 11606 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pVCpu->iem.s.iEffSeg); 11607 11607 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11608 11608 } 11609 11609 case IEMMODE_64BIT: 11610 switch (p IemCpu->enmEffAddrMode)11610 switch (pVCpu->iem.s.enmEffAddrMode) 11611 11611 { 11612 11612 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_2); 11613 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, p IemCpu->iEffSeg);11614 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, p IemCpu->iEffSeg);11613 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pVCpu->iem.s.iEffSeg); 11614 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pVCpu->iem.s.iEffSeg); 11615 11615 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11616 11616 } … … 11625 11625 * Using ugly macro for implementing the cases, sharing it with cmpsb. 11626 11626 */ 11627 switch (p IemCpu->enmEffOpSize)11627 switch (pVCpu->iem.s.enmEffOpSize) 11628 11628 { 11629 11629 case IEMMODE_16BIT: 11630 switch (p IemCpu->enmEffAddrMode)11630 switch (pVCpu->iem.s.enmEffAddrMode) 11631 11631 { 11632 11632 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break; … … 11638 11638 11639 11639 case IEMMODE_32BIT: 11640 switch (p IemCpu->enmEffAddrMode)11640 switch (pVCpu->iem.s.enmEffAddrMode) 11641 11641 { 11642 11642 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break; … … 11648 11648 11649 11649 case IEMMODE_64BIT: 11650 switch (p IemCpu->enmEffAddrMode)11650 switch (pVCpu->iem.s.enmEffAddrMode) 11651 11651 { 11652 11652 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break; … … 11706 11706 * Use the C implementation if a repeat prefix is encountered. 11707 11707 */ 11708 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))11708 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 11709 11709 { 11710 11710 IEMOP_MNEMONIC("rep stos Yb,al"); 11711 switch (p IemCpu->enmEffAddrMode)11711 switch (pVCpu->iem.s.enmEffAddrMode) 11712 11712 { 11713 11713 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16); … … 11722 11722 * Sharing case implementation with stos[wdq] below. 11723 11723 */ 11724 switch (p IemCpu->enmEffAddrMode)11724 switch (pVCpu->iem.s.enmEffAddrMode) 11725 11725 { 11726 11726 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break; … … 11741 11741 * Use the C implementation if a repeat prefix is encountered. 11742 11742 */ 11743 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))11743 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 11744 11744 { 11745 11745 IEMOP_MNEMONIC("rep stos Yv,rAX"); 11746 switch (p IemCpu->enmEffOpSize)11746 switch (pVCpu->iem.s.enmEffOpSize) 11747 11747 { 11748 11748 case IEMMODE_16BIT: 11749 switch (p IemCpu->enmEffAddrMode)11749 switch (pVCpu->iem.s.enmEffAddrMode) 11750 11750 { 11751 11751 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16); … … 11756 11756 break; 11757 11757 case IEMMODE_32BIT: 11758 switch (p IemCpu->enmEffAddrMode)11758 switch (pVCpu->iem.s.enmEffAddrMode) 11759 11759 { 11760 11760 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16); … … 11764 11764 } 11765 11765 case IEMMODE_64BIT: 11766 switch (p IemCpu->enmEffAddrMode)11766 switch (pVCpu->iem.s.enmEffAddrMode) 11767 11767 { 11768 11768 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_9); … … 11780 11780 * Using ugly macro for implementing the cases, sharing it with stosb. 11781 11781 */ 11782 switch (p IemCpu->enmEffOpSize)11782 switch (pVCpu->iem.s.enmEffOpSize) 11783 11783 { 11784 11784 case IEMMODE_16BIT: 11785 switch (p IemCpu->enmEffAddrMode)11785 switch (pVCpu->iem.s.enmEffAddrMode) 11786 11786 { 11787 11787 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break; … … 11793 11793 11794 11794 case IEMMODE_32BIT: 11795 switch (p IemCpu->enmEffAddrMode)11795 switch (pVCpu->iem.s.enmEffAddrMode) 11796 11796 { 11797 11797 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break; … … 11803 11803 11804 11804 case IEMMODE_64BIT: 11805 switch (p IemCpu->enmEffAddrMode)11805 switch (pVCpu->iem.s.enmEffAddrMode) 11806 11806 { 11807 11807 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break; … … 11824 11824 IEM_MC_LOCAL(RTGCPTR, uAddr); \ 11825 11825 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \ 11826 IEM_MC_FETCH_MEM_U##ValBits(uValue, p IemCpu->iEffSeg, uAddr); \11826 IEM_MC_FETCH_MEM_U##ValBits(uValue, pVCpu->iem.s.iEffSeg, uAddr); \ 11827 11827 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \ 11828 11828 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \ … … 11842 11842 * Use the C implementation if a repeat prefix is encountered. 11843 11843 */ 11844 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))11844 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 11845 11845 { 11846 11846 IEMOP_MNEMONIC("rep lodsb al,Xb"); 11847 switch (p IemCpu->enmEffAddrMode)11847 switch (pVCpu->iem.s.enmEffAddrMode) 11848 11848 { 11849 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, p IemCpu->iEffSeg);11850 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, p IemCpu->iEffSeg);11851 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, p IemCpu->iEffSeg);11849 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pVCpu->iem.s.iEffSeg); 11850 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pVCpu->iem.s.iEffSeg); 11851 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pVCpu->iem.s.iEffSeg); 11852 11852 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11853 11853 } … … 11858 11858 * Sharing case implementation with stos[wdq] below. 11859 11859 */ 11860 switch (p IemCpu->enmEffAddrMode)11860 switch (pVCpu->iem.s.enmEffAddrMode) 11861 11861 { 11862 11862 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break; … … 11877 11877 * Use the C implementation if a repeat prefix is encountered. 11878 11878 */ 11879 if (p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))11879 if (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 11880 11880 { 11881 11881 IEMOP_MNEMONIC("rep lods rAX,Xv"); 11882 switch (p IemCpu->enmEffOpSize)11882 switch (pVCpu->iem.s.enmEffOpSize) 11883 11883 { 11884 11884 case IEMMODE_16BIT: 11885 switch (p IemCpu->enmEffAddrMode)11885 switch (pVCpu->iem.s.enmEffAddrMode) 11886 11886 { 11887 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, p IemCpu->iEffSeg);11888 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, p IemCpu->iEffSeg);11889 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, p IemCpu->iEffSeg);11887 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pVCpu->iem.s.iEffSeg); 11888 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pVCpu->iem.s.iEffSeg); 11889 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pVCpu->iem.s.iEffSeg); 11890 11890 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11891 11891 } 11892 11892 break; 11893 11893 case IEMMODE_32BIT: 11894 switch (p IemCpu->enmEffAddrMode)11894 switch (pVCpu->iem.s.enmEffAddrMode) 11895 11895 { 11896 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, p IemCpu->iEffSeg);11897 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, p IemCpu->iEffSeg);11898 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, p IemCpu->iEffSeg);11896 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pVCpu->iem.s.iEffSeg); 11897 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pVCpu->iem.s.iEffSeg); 11898 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pVCpu->iem.s.iEffSeg); 11899 11899 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11900 11900 } 11901 11901 case IEMMODE_64BIT: 11902 switch (p IemCpu->enmEffAddrMode)11902 switch (pVCpu->iem.s.enmEffAddrMode) 11903 11903 { 11904 11904 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_7); 11905 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, p IemCpu->iEffSeg);11906 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, p IemCpu->iEffSeg);11905 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pVCpu->iem.s.iEffSeg); 11906 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pVCpu->iem.s.iEffSeg); 11907 11907 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 11908 11908 } … … 11916 11916 * Using ugly macro for implementing the cases, sharing it with lodsb. 11917 11917 */ 11918 switch (p IemCpu->enmEffOpSize)11918 switch (pVCpu->iem.s.enmEffOpSize) 11919 11919 { 11920 11920 case IEMMODE_16BIT: 11921 switch (p IemCpu->enmEffAddrMode)11921 switch (pVCpu->iem.s.enmEffAddrMode) 11922 11922 { 11923 11923 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break; … … 11929 11929 11930 11930 case IEMMODE_32BIT: 11931 switch (p IemCpu->enmEffAddrMode)11931 switch (pVCpu->iem.s.enmEffAddrMode) 11932 11932 { 11933 11933 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break; … … 11939 11939 11940 11940 case IEMMODE_64BIT: 11941 switch (p IemCpu->enmEffAddrMode)11941 switch (pVCpu->iem.s.enmEffAddrMode) 11942 11942 { 11943 11943 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break; … … 11984 11984 * Use the C implementation if a repeat prefix is encountered. 11985 11985 */ 11986 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPZ)11986 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPZ) 11987 11987 { 11988 11988 IEMOP_MNEMONIC("repe scasb al,Xb"); 11989 switch (p IemCpu->enmEffAddrMode)11989 switch (pVCpu->iem.s.enmEffAddrMode) 11990 11990 { 11991 11991 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16); … … 11995 11995 } 11996 11996 } 11997 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPNZ)11997 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPNZ) 11998 11998 { 11999 11999 IEMOP_MNEMONIC("repne scasb al,Xb"); 12000 switch (p IemCpu->enmEffAddrMode)12000 switch (pVCpu->iem.s.enmEffAddrMode) 12001 12001 { 12002 12002 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16); … … 12011 12011 * Sharing case implementation with stos[wdq] below. 12012 12012 */ 12013 switch (p IemCpu->enmEffAddrMode)12013 switch (pVCpu->iem.s.enmEffAddrMode) 12014 12014 { 12015 12015 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break; … … 12030 12030 * Use the C implementation if a repeat prefix is encountered. 12031 12031 */ 12032 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPZ)12032 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPZ) 12033 12033 { 12034 12034 IEMOP_MNEMONIC("repe scas rAX,Xv"); 12035 switch (p IemCpu->enmEffOpSize)12035 switch (pVCpu->iem.s.enmEffOpSize) 12036 12036 { 12037 12037 case IEMMODE_16BIT: 12038 switch (p IemCpu->enmEffAddrMode)12038 switch (pVCpu->iem.s.enmEffAddrMode) 12039 12039 { 12040 12040 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16); … … 12045 12045 break; 12046 12046 case IEMMODE_32BIT: 12047 switch (p IemCpu->enmEffAddrMode)12047 switch (pVCpu->iem.s.enmEffAddrMode) 12048 12048 { 12049 12049 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16); … … 12053 12053 } 12054 12054 case IEMMODE_64BIT: 12055 switch (p IemCpu->enmEffAddrMode)12055 switch (pVCpu->iem.s.enmEffAddrMode) 12056 12056 { 12057 12057 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_6); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */ … … 12063 12063 } 12064 12064 } 12065 if (p IemCpu->fPrefixes & IEM_OP_PRF_REPNZ)12065 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REPNZ) 12066 12066 { 12067 12067 IEMOP_MNEMONIC("repne scas rAX,Xv"); 12068 switch (p IemCpu->enmEffOpSize)12068 switch (pVCpu->iem.s.enmEffOpSize) 12069 12069 { 12070 12070 case IEMMODE_16BIT: 12071 switch (p IemCpu->enmEffAddrMode)12071 switch (pVCpu->iem.s.enmEffAddrMode) 12072 12072 { 12073 12073 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m16); … … 12078 12078 break; 12079 12079 case IEMMODE_32BIT: 12080 switch (p IemCpu->enmEffAddrMode)12080 switch (pVCpu->iem.s.enmEffAddrMode) 12081 12081 { 12082 12082 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m16); … … 12086 12086 } 12087 12087 case IEMMODE_64BIT: 12088 switch (p IemCpu->enmEffAddrMode)12088 switch (pVCpu->iem.s.enmEffAddrMode) 12089 12089 { 12090 12090 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_5); … … 12102 12102 * Using ugly macro for implementing the cases, sharing it with scasb. 12103 12103 */ 12104 switch (p IemCpu->enmEffOpSize)12104 switch (pVCpu->iem.s.enmEffOpSize) 12105 12105 { 12106 12106 case IEMMODE_16BIT: 12107 switch (p IemCpu->enmEffAddrMode)12107 switch (pVCpu->iem.s.enmEffAddrMode) 12108 12108 { 12109 12109 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break; … … 12115 12115 12116 12116 case IEMMODE_32BIT: 12117 switch (p IemCpu->enmEffAddrMode)12117 switch (pVCpu->iem.s.enmEffAddrMode) 12118 12118 { 12119 12119 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break; … … 12125 12125 12126 12126 case IEMMODE_64BIT: 12127 switch (p IemCpu->enmEffAddrMode)12127 switch (pVCpu->iem.s.enmEffAddrMode) 12128 12128 { 12129 12129 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break; … … 12162 12162 { 12163 12163 IEMOP_MNEMONIC("mov AL,Ib"); 12164 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | p IemCpu->uRexB);12164 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | pVCpu->iem.s.uRexB); 12165 12165 } 12166 12166 … … 12170 12170 { 12171 12171 IEMOP_MNEMONIC("mov CL,Ib"); 12172 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | p IemCpu->uRexB);12172 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | pVCpu->iem.s.uRexB); 12173 12173 } 12174 12174 … … 12178 12178 { 12179 12179 IEMOP_MNEMONIC("mov DL,Ib"); 12180 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | p IemCpu->uRexB);12180 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | pVCpu->iem.s.uRexB); 12181 12181 } 12182 12182 … … 12186 12186 { 12187 12187 IEMOP_MNEMONIC("mov BL,Ib"); 12188 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | p IemCpu->uRexB);12188 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | pVCpu->iem.s.uRexB); 12189 12189 } 12190 12190 … … 12194 12194 { 12195 12195 IEMOP_MNEMONIC("mov AH,Ib"); 12196 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | p IemCpu->uRexB);12196 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | pVCpu->iem.s.uRexB); 12197 12197 } 12198 12198 … … 12202 12202 { 12203 12203 IEMOP_MNEMONIC("mov CH,Ib"); 12204 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | p IemCpu->uRexB);12204 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | pVCpu->iem.s.uRexB); 12205 12205 } 12206 12206 … … 12210 12210 { 12211 12211 IEMOP_MNEMONIC("mov DH,Ib"); 12212 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | p IemCpu->uRexB);12212 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | pVCpu->iem.s.uRexB); 12213 12213 } 12214 12214 … … 12218 12218 { 12219 12219 IEMOP_MNEMONIC("mov BH,Ib"); 12220 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | p IemCpu->uRexB);12220 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | pVCpu->iem.s.uRexB); 12221 12221 } 12222 12222 … … 12227 12227 FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg) 12228 12228 { 12229 switch (p IemCpu->enmEffOpSize)12229 switch (pVCpu->iem.s.enmEffOpSize) 12230 12230 { 12231 12231 case IEMMODE_16BIT: … … 12276 12276 { 12277 12277 IEMOP_MNEMONIC("mov rAX,IV"); 12278 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | p IemCpu->uRexB);12278 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | pVCpu->iem.s.uRexB); 12279 12279 } 12280 12280 … … 12284 12284 { 12285 12285 IEMOP_MNEMONIC("mov rCX,IV"); 12286 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | p IemCpu->uRexB);12286 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | pVCpu->iem.s.uRexB); 12287 12287 } 12288 12288 … … 12292 12292 { 12293 12293 IEMOP_MNEMONIC("mov rDX,IV"); 12294 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | p IemCpu->uRexB);12294 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | pVCpu->iem.s.uRexB); 12295 12295 } 12296 12296 … … 12300 12300 { 12301 12301 IEMOP_MNEMONIC("mov rBX,IV"); 12302 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | p IemCpu->uRexB);12302 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | pVCpu->iem.s.uRexB); 12303 12303 } 12304 12304 … … 12308 12308 { 12309 12309 IEMOP_MNEMONIC("mov rSP,IV"); 12310 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | p IemCpu->uRexB);12310 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | pVCpu->iem.s.uRexB); 12311 12311 } 12312 12312 … … 12316 12316 { 12317 12317 IEMOP_MNEMONIC("mov rBP,IV"); 12318 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | p IemCpu->uRexB);12318 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | pVCpu->iem.s.uRexB); 12319 12319 } 12320 12320 … … 12324 12324 { 12325 12325 IEMOP_MNEMONIC("mov rSI,IV"); 12326 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | p IemCpu->uRexB);12326 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | pVCpu->iem.s.uRexB); 12327 12327 } 12328 12328 … … 12332 12332 { 12333 12333 IEMOP_MNEMONIC("mov rDI,IV"); 12334 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | p IemCpu->uRexB);12334 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | pVCpu->iem.s.uRexB); 12335 12335 } 12336 12336 … … 12365 12365 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); 12366 12366 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12367 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12367 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12368 12368 IEM_MC_REF_EFLAGS(pEFlags); 12369 12369 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); … … 12384 12384 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 12385 12385 IEM_MC_ASSIGN(cShiftArg, cShift); 12386 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12386 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12387 12387 IEM_MC_FETCH_EFLAGS(EFlags); 12388 12388 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); … … 12422 12422 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 12423 12423 IEMOP_HLP_NO_LOCK_PREFIX(); 12424 switch (p IemCpu->enmEffOpSize)12424 switch (pVCpu->iem.s.enmEffOpSize) 12425 12425 { 12426 12426 case IEMMODE_16BIT: … … 12429 12429 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); 12430 12430 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12431 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12431 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12432 12432 IEM_MC_REF_EFLAGS(pEFlags); 12433 12433 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); … … 12441 12441 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); 12442 12442 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12443 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12443 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12444 12444 IEM_MC_REF_EFLAGS(pEFlags); 12445 12445 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); … … 12454 12454 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); 12455 12455 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12456 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12456 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12457 12457 IEM_MC_REF_EFLAGS(pEFlags); 12458 12458 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); … … 12468 12468 /* memory */ 12469 12469 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ 12470 switch (p IemCpu->enmEffOpSize)12470 switch (pVCpu->iem.s.enmEffOpSize) 12471 12471 { 12472 12472 case IEMMODE_16BIT: … … 12480 12480 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 12481 12481 IEM_MC_ASSIGN(cShiftArg, cShift); 12482 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12482 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12483 12483 IEM_MC_FETCH_EFLAGS(EFlags); 12484 12484 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); … … 12500 12500 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 12501 12501 IEM_MC_ASSIGN(cShiftArg, cShift); 12502 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12502 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12503 12503 IEM_MC_FETCH_EFLAGS(EFlags); 12504 12504 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); … … 12520 12520 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift); 12521 12521 IEM_MC_ASSIGN(cShiftArg, cShift); 12522 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12522 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12523 12523 IEM_MC_FETCH_EFLAGS(EFlags); 12524 12524 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); … … 12543 12543 IEMOP_HLP_NO_LOCK_PREFIX(); 12544 12544 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 12545 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, p IemCpu->enmEffOpSize, u16Imm);12545 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pVCpu->iem.s.enmEffOpSize, u16Imm); 12546 12546 } 12547 12547 … … 12553 12553 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 12554 12554 IEMOP_HLP_NO_LOCK_PREFIX(); 12555 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, p IemCpu->enmEffOpSize, 0);12555 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pVCpu->iem.s.enmEffOpSize, 0); 12556 12556 } 12557 12557 … … 12561 12561 { 12562 12562 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 12563 if ( p IemCpu->enmCpuMode == IEMMODE_64BIT12563 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT 12564 12564 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 12565 12565 { … … 12588 12588 outside of 64-bit mode. VEX is not available in real or v86 mode. */ 12589 12589 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 12590 if (p IemCpu->enmCpuMode != IEMMODE_64BIT)12590 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) 12591 12591 { 12592 12592 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) … … 12605 12605 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode); 12606 12606 #if 0 /* will make sense of this next week... */ 12607 if ( !(p IemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX))12607 if ( !(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) 12608 12608 && 12609 12609 ) … … 12632 12632 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 12633 12633 IEM_MC_BEGIN(0, 0); 12634 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u8Imm);12634 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u8Imm); 12635 12635 IEM_MC_ADVANCE_RIP(); 12636 12636 IEM_MC_END(); … … 12643 12643 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); 12644 12644 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 12645 IEM_MC_STORE_MEM_U8(p IemCpu->iEffSeg, GCPtrEffDst, u8Imm);12645 IEM_MC_STORE_MEM_U8(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u8Imm); 12646 12646 IEM_MC_ADVANCE_RIP(); 12647 12647 IEM_MC_END(); … … 12663 12663 { 12664 12664 /* register access */ 12665 switch (p IemCpu->enmEffOpSize)12665 switch (pVCpu->iem.s.enmEffOpSize) 12666 12666 { 12667 12667 case IEMMODE_16BIT: 12668 12668 IEM_MC_BEGIN(0, 0); 12669 12669 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); 12670 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u16Imm);12670 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u16Imm); 12671 12671 IEM_MC_ADVANCE_RIP(); 12672 12672 IEM_MC_END(); … … 12676 12676 IEM_MC_BEGIN(0, 0); 12677 12677 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); 12678 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u32Imm);12678 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u32Imm); 12679 12679 IEM_MC_ADVANCE_RIP(); 12680 12680 IEM_MC_END(); … … 12684 12684 IEM_MC_BEGIN(0, 0); 12685 12685 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); 12686 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB, u64Imm);12686 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, u64Imm); 12687 12687 IEM_MC_ADVANCE_RIP(); 12688 12688 IEM_MC_END(); … … 12695 12695 { 12696 12696 /* memory access. */ 12697 switch (p IemCpu->enmEffOpSize)12697 switch (pVCpu->iem.s.enmEffOpSize) 12698 12698 { 12699 12699 case IEMMODE_16BIT: … … 12702 12702 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2); 12703 12703 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); 12704 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Imm);12704 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Imm); 12705 12705 IEM_MC_ADVANCE_RIP(); 12706 12706 IEM_MC_END(); … … 12712 12712 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); 12713 12713 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); 12714 IEM_MC_STORE_MEM_U32(p IemCpu->iEffSeg, GCPtrEffDst, u32Imm);12714 IEM_MC_STORE_MEM_U32(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u32Imm); 12715 12715 IEM_MC_ADVANCE_RIP(); 12716 12716 IEM_MC_END(); … … 12722 12722 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); 12723 12723 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); 12724 IEM_MC_STORE_MEM_U64(p IemCpu->iEffSeg, GCPtrEffDst, u64Imm);12724 IEM_MC_STORE_MEM_U64(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u64Imm); 12725 12725 IEM_MC_ADVANCE_RIP(); 12726 12726 IEM_MC_END(); … … 12744 12744 uint16_t cbFrame; IEM_OPCODE_GET_NEXT_U16(&cbFrame); 12745 12745 uint8_t u8NestingLevel; IEM_OPCODE_GET_NEXT_U8(&u8NestingLevel); 12746 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_enter, p IemCpu->enmEffOpSize, cbFrame, u8NestingLevel);12746 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_enter, pVCpu->iem.s.enmEffOpSize, cbFrame, u8NestingLevel); 12747 12747 } 12748 12748 … … 12755 12755 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 12756 12756 IEMOP_HLP_NO_LOCK_PREFIX(); 12757 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, p IemCpu->enmEffOpSize);12757 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pVCpu->iem.s.enmEffOpSize); 12758 12758 } 12759 12759 … … 12766 12766 IEMOP_HLP_NO_LOCK_PREFIX(); 12767 12767 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 12768 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, p IemCpu->enmEffOpSize, u16Imm);12768 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, u16Imm); 12769 12769 } 12770 12770 … … 12776 12776 IEMOP_HLP_NO_LOCK_PREFIX(); 12777 12777 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 12778 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, p IemCpu->enmEffOpSize, 0);12778 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, 0); 12779 12779 } 12780 12780 … … 12817 12817 IEMOP_MNEMONIC("iret"); 12818 12818 IEMOP_HLP_NO_LOCK_PREFIX(); 12819 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, p IemCpu->enmEffOpSize);12819 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pVCpu->iem.s.enmEffOpSize); 12820 12820 } 12821 12821 … … 12848 12848 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1); 12849 12849 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12850 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12850 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12851 12851 IEM_MC_REF_EFLAGS(pEFlags); 12852 12852 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); … … 12865 12865 12866 12866 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 12867 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12867 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12868 12868 IEM_MC_FETCH_EFLAGS(EFlags); 12869 12869 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); … … 12902 12902 /* register */ 12903 12903 IEMOP_HLP_NO_LOCK_PREFIX(); 12904 switch (p IemCpu->enmEffOpSize)12904 switch (pVCpu->iem.s.enmEffOpSize) 12905 12905 { 12906 12906 case IEMMODE_16BIT: … … 12909 12909 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); 12910 12910 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12911 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12911 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12912 12912 IEM_MC_REF_EFLAGS(pEFlags); 12913 12913 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); … … 12921 12921 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); 12922 12922 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12923 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12923 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12924 12924 IEM_MC_REF_EFLAGS(pEFlags); 12925 12925 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); … … 12934 12934 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); 12935 12935 IEM_MC_ARG(uint32_t *, pEFlags, 2); 12936 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);12936 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 12937 12937 IEM_MC_REF_EFLAGS(pEFlags); 12938 12938 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); … … 12948 12948 /* memory */ 12949 12949 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ 12950 switch (p IemCpu->enmEffOpSize)12950 switch (pVCpu->iem.s.enmEffOpSize) 12951 12951 { 12952 12952 case IEMMODE_16BIT: … … 12958 12958 12959 12959 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 12960 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12960 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12961 12961 IEM_MC_FETCH_EFLAGS(EFlags); 12962 12962 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); … … 12976 12976 12977 12977 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 12978 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12978 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12979 12979 IEM_MC_FETCH_EFLAGS(EFlags); 12980 12980 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); … … 12994 12994 12995 12995 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 12996 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);12996 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12997 12997 IEM_MC_FETCH_EFLAGS(EFlags); 12998 12998 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); … … 13037 13037 IEM_MC_ARG(uint8_t, cShiftArg, 1); 13038 13038 IEM_MC_ARG(uint32_t *, pEFlags, 2); 13039 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);13039 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 13040 13040 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13041 13041 IEM_MC_REF_EFLAGS(pEFlags); … … 13056 13056 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 13057 13057 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13058 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);13058 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 13059 13059 IEM_MC_FETCH_EFLAGS(EFlags); 13060 13060 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); … … 13092 13092 /* register */ 13093 13093 IEMOP_HLP_NO_LOCK_PREFIX(); 13094 switch (p IemCpu->enmEffOpSize)13094 switch (pVCpu->iem.s.enmEffOpSize) 13095 13095 { 13096 13096 case IEMMODE_16BIT: … … 13099 13099 IEM_MC_ARG(uint8_t, cShiftArg, 1); 13100 13100 IEM_MC_ARG(uint32_t *, pEFlags, 2); 13101 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);13101 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 13102 13102 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13103 13103 IEM_MC_REF_EFLAGS(pEFlags); … … 13112 13112 IEM_MC_ARG(uint8_t, cShiftArg, 1); 13113 13113 IEM_MC_ARG(uint32_t *, pEFlags, 2); 13114 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);13114 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 13115 13115 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13116 13116 IEM_MC_REF_EFLAGS(pEFlags); … … 13126 13126 IEM_MC_ARG(uint8_t, cShiftArg, 1); 13127 13127 IEM_MC_ARG(uint32_t *, pEFlags, 2); 13128 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);13128 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 13129 13129 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13130 13130 IEM_MC_REF_EFLAGS(pEFlags); … … 13141 13141 /* memory */ 13142 13142 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ 13143 switch (p IemCpu->enmEffOpSize)13143 switch (pVCpu->iem.s.enmEffOpSize) 13144 13144 { 13145 13145 case IEMMODE_16BIT: … … 13152 13152 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 13153 13153 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13154 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);13154 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 13155 13155 IEM_MC_FETCH_EFLAGS(EFlags); 13156 13156 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); … … 13171 13171 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 13172 13172 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13173 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);13173 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 13174 13174 IEM_MC_FETCH_EFLAGS(EFlags); 13175 13175 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); … … 13190 13190 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 13191 13191 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 13192 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);13192 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 13193 13193 IEM_MC_FETCH_EFLAGS(EFlags); 13194 13194 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); … … 13255 13255 IEMOP_MNEMONIC("xlat"); 13256 13256 IEMOP_HLP_NO_LOCK_PREFIX(); 13257 switch (p IemCpu->enmEffAddrMode)13257 switch (pVCpu->iem.s.enmEffAddrMode) 13258 13258 { 13259 13259 case IEMMODE_16BIT: … … 13263 13263 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX); 13264 13264 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX); 13265 IEM_MC_FETCH_MEM16_U8(u8Tmp, p IemCpu->iEffSeg, u16Addr);13265 IEM_MC_FETCH_MEM16_U8(u8Tmp, pVCpu->iem.s.iEffSeg, u16Addr); 13266 13266 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp); 13267 13267 IEM_MC_ADVANCE_RIP(); … … 13275 13275 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX); 13276 13276 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX); 13277 IEM_MC_FETCH_MEM32_U8(u8Tmp, p IemCpu->iEffSeg, u32Addr);13277 IEM_MC_FETCH_MEM32_U8(u8Tmp, pVCpu->iem.s.iEffSeg, u32Addr); 13278 13278 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp); 13279 13279 IEM_MC_ADVANCE_RIP(); … … 13287 13287 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX); 13288 13288 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX); 13289 IEM_MC_FETCH_MEM_U8(u8Tmp, p IemCpu->iEffSeg, u64Addr);13289 IEM_MC_FETCH_MEM_U8(u8Tmp, pVCpu->iem.s.iEffSeg, u64Addr); 13290 13290 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp); 13291 13291 IEM_MC_ADVANCE_RIP(); … … 13479 13479 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 13480 13480 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 13481 IEM_MC_FETCH_MEM_R32(r32Val2, p IemCpu->iEffSeg, GCPtrEffSrc);13481 IEM_MC_FETCH_MEM_R32(r32Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13482 13482 13483 13483 IEM_MC_PREPARE_FPU_USAGE(); … … 13529 13529 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 13530 13530 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 13531 IEM_MC_FETCH_MEM_R32(r32Val2, p IemCpu->iEffSeg, GCPtrEffSrc);13531 IEM_MC_FETCH_MEM_R32(r32Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13532 13532 13533 13533 IEM_MC_PREPARE_FPU_USAGE(); 13534 13534 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 13535 13535 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2); 13536 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);13536 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13537 13537 IEM_MC_ELSE() 13538 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);13538 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13539 13539 IEM_MC_ENDIF(); 13540 13540 IEM_MC_ADVANCE_RIP(); … … 13563 13563 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 13564 13564 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 13565 IEM_MC_FETCH_MEM_R32(r32Val2, p IemCpu->iEffSeg, GCPtrEffSrc);13565 IEM_MC_FETCH_MEM_R32(r32Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13566 13566 13567 13567 IEM_MC_PREPARE_FPU_USAGE(); 13568 13568 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 13569 13569 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2); 13570 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);13570 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13571 13571 IEM_MC_ELSE() 13572 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);13572 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13573 13573 IEM_MC_ENDIF(); 13574 13574 IEM_MC_ADVANCE_RIP(); … … 13614 13614 FNIEMOP_DEF(iemOp_EscF0) 13615 13615 { 13616 p IemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;13616 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1; 13617 13617 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 13618 13618 … … 13668 13668 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 13669 13669 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 13670 IEM_MC_FETCH_MEM_R32(r32Val, p IemCpu->iEffSeg, GCPtrEffSrc);13670 IEM_MC_FETCH_MEM_R32(r32Val, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13671 13671 13672 13672 IEM_MC_PREPARE_FPU_USAGE(); 13673 13673 IEM_MC_IF_FPUREG_IS_EMPTY(7) 13674 13674 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r32_to_r80, pFpuRes, pr32Val); 13675 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, p IemCpu->iEffSeg, GCPtrEffSrc);13675 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13676 13676 IEM_MC_ELSE() 13677 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(p IemCpu->iEffSeg, GCPtrEffSrc);13677 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13678 13678 IEM_MC_ENDIF(); 13679 13679 IEM_MC_ADVANCE_RIP(); … … 13700 13700 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 13701 13701 13702 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);13702 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 13703 13703 IEM_MC_PREPARE_FPU_USAGE(); 13704 13704 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 13705 13705 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value); 13706 13706 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw); 13707 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);13707 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 13708 13708 IEM_MC_ELSE() 13709 13709 IEM_MC_IF_FCW_IM() … … 13711 13711 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W); 13712 13712 IEM_MC_ENDIF(); 13713 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);13713 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 13714 13714 IEM_MC_ENDIF(); 13715 13715 IEM_MC_ADVANCE_RIP(); … … 13736 13736 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 13737 13737 13738 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);13738 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 13739 13739 IEM_MC_PREPARE_FPU_USAGE(); 13740 13740 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 13741 13741 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value); 13742 13742 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw); 13743 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);13743 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 13744 13744 IEM_MC_ELSE() 13745 13745 IEM_MC_IF_FCW_IM() … … 13747 13747 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W); 13748 13748 IEM_MC_ENDIF(); 13749 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);13749 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 13750 13750 IEM_MC_ENDIF(); 13751 13751 IEM_MC_ADVANCE_RIP(); … … 13761 13761 IEMOP_MNEMONIC("fldenv m14/28byte"); 13762 13762 IEM_MC_BEGIN(3, 0); 13763 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ p IemCpu->enmEffOpSize, 0);13763 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pVCpu->iem.s.enmEffOpSize, 0); 13764 13764 IEM_MC_ARG(uint8_t, iEffSeg, 1); 13765 13765 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2); … … 13768 13768 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 13769 13769 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE(); 13770 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);13770 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 13771 13771 IEM_MC_CALL_CIMPL_3(iemCImpl_fldenv, enmEffOpSize, iEffSeg, GCPtrEffSrc); 13772 13772 IEM_MC_END(); … … 13786 13786 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 13787 13787 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE(); 13788 IEM_MC_FETCH_MEM_U16(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);13788 IEM_MC_FETCH_MEM_U16(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 13789 13789 IEM_MC_CALL_CIMPL_1(iemCImpl_fldcw, u16Fsw); 13790 13790 IEM_MC_END(); … … 13798 13798 IEMOP_MNEMONIC("fstenv m14/m28byte"); 13799 13799 IEM_MC_BEGIN(3, 0); 13800 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ p IemCpu->enmEffOpSize, 0);13800 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pVCpu->iem.s.enmEffOpSize, 0); 13801 13801 IEM_MC_ARG(uint8_t, iEffSeg, 1); 13802 13802 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2); … … 13805 13805 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 13806 13806 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 13807 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);13807 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 13808 13808 IEM_MC_CALL_CIMPL_3(iemCImpl_fnstenv, enmEffOpSize, iEffSeg, GCPtrEffDst); 13809 13809 IEM_MC_END(); … … 13824 13824 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 13825 13825 IEM_MC_FETCH_FCW(u16Fcw); 13826 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Fcw);13826 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Fcw); 13827 13827 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */ 13828 13828 IEM_MC_END(); … … 14400 14400 FNIEMOP_DEF(iemOp_EscF1) 14401 14401 { 14402 p IemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;14402 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1; 14403 14403 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 14404 14404 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 14614 14614 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 14615 14615 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14616 IEM_MC_FETCH_MEM_I32(i32Val2, p IemCpu->iEffSeg, GCPtrEffSrc);14616 IEM_MC_FETCH_MEM_I32(i32Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14617 14617 14618 14618 IEM_MC_PREPARE_FPU_USAGE(); … … 14664 14664 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 14665 14665 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14666 IEM_MC_FETCH_MEM_I32(i32Val2, p IemCpu->iEffSeg, GCPtrEffSrc);14666 IEM_MC_FETCH_MEM_I32(i32Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14667 14667 14668 14668 IEM_MC_PREPARE_FPU_USAGE(); 14669 14669 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 14670 14670 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2); 14671 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);14671 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14672 14672 IEM_MC_ELSE() 14673 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);14673 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14674 14674 IEM_MC_ENDIF(); 14675 14675 IEM_MC_ADVANCE_RIP(); … … 14698 14698 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 14699 14699 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14700 IEM_MC_FETCH_MEM_I32(i32Val2, p IemCpu->iEffSeg, GCPtrEffSrc);14700 IEM_MC_FETCH_MEM_I32(i32Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14701 14701 14702 14702 IEM_MC_PREPARE_FPU_USAGE(); 14703 14703 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 14704 14704 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2); 14705 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);14705 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14706 14706 IEM_MC_ELSE() 14707 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);14707 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14708 14708 IEM_MC_ENDIF(); 14709 14709 IEM_MC_ADVANCE_RIP(); … … 14749 14749 FNIEMOP_DEF(iemOp_EscF2) 14750 14750 { 14751 p IemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;14751 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1; 14752 14752 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 14753 14753 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 14804 14804 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 14805 14805 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14806 IEM_MC_FETCH_MEM_I32(i32Val, p IemCpu->iEffSeg, GCPtrEffSrc);14806 IEM_MC_FETCH_MEM_I32(i32Val, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14807 14807 14808 14808 IEM_MC_PREPARE_FPU_USAGE(); 14809 14809 IEM_MC_IF_FPUREG_IS_EMPTY(7) 14810 14810 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i32_to_r80, pFpuRes, pi32Val); 14811 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, p IemCpu->iEffSeg, GCPtrEffSrc);14811 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14812 14812 IEM_MC_ELSE() 14813 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(p IemCpu->iEffSeg, GCPtrEffSrc);14813 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14814 14814 IEM_MC_ENDIF(); 14815 14815 IEM_MC_ADVANCE_RIP(); … … 14836 14836 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14837 14837 14838 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);14838 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 14839 14839 IEM_MC_PREPARE_FPU_USAGE(); 14840 14840 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 14841 14841 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i32, pu16Fsw, pi32Dst, pr80Value); 14842 14842 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw); 14843 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);14843 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14844 14844 IEM_MC_ELSE() 14845 14845 IEM_MC_IF_FCW_IM() … … 14847 14847 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W); 14848 14848 IEM_MC_ENDIF(); 14849 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);14849 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14850 14850 IEM_MC_ENDIF(); 14851 14851 IEM_MC_ADVANCE_RIP(); … … 14872 14872 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14873 14873 14874 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);14874 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 14875 14875 IEM_MC_PREPARE_FPU_USAGE(); 14876 14876 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 14877 14877 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value); 14878 14878 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw); 14879 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);14879 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14880 14880 IEM_MC_ELSE() 14881 14881 IEM_MC_IF_FCW_IM() … … 14883 14883 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W); 14884 14884 IEM_MC_ENDIF(); 14885 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);14885 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14886 14886 IEM_MC_ENDIF(); 14887 14887 IEM_MC_ADVANCE_RIP(); … … 14908 14908 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14909 14909 14910 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);14910 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 14911 14911 IEM_MC_PREPARE_FPU_USAGE(); 14912 14912 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 14913 14913 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value); 14914 14914 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw); 14915 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);14915 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14916 14916 IEM_MC_ELSE() 14917 14917 IEM_MC_IF_FCW_IM() … … 14919 14919 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W); 14920 14920 IEM_MC_ENDIF(); 14921 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);14921 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14922 14922 IEM_MC_ENDIF(); 14923 14923 IEM_MC_ADVANCE_RIP(); … … 14945 14945 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 14946 14946 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14947 IEM_MC_FETCH_MEM_R80(r80Val, p IemCpu->iEffSeg, GCPtrEffSrc);14947 IEM_MC_FETCH_MEM_R80(r80Val, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14948 14948 14949 14949 IEM_MC_PREPARE_FPU_USAGE(); 14950 14950 IEM_MC_IF_FPUREG_IS_EMPTY(7) 14951 14951 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r80_from_r80, pFpuRes, pr80Val); 14952 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, p IemCpu->iEffSeg, GCPtrEffSrc);14952 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14953 14953 IEM_MC_ELSE() 14954 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(p IemCpu->iEffSeg, GCPtrEffSrc);14954 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 14955 14955 IEM_MC_ENDIF(); 14956 14956 IEM_MC_ADVANCE_RIP(); … … 14977 14977 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 14978 14978 14979 IEM_MC_MEM_MAP(pr80Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);14979 IEM_MC_MEM_MAP(pr80Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 14980 14980 IEM_MC_PREPARE_FPU_USAGE(); 14981 14981 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 14982 14982 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r80, pu16Fsw, pr80Dst, pr80Value); 14983 14983 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr80Dst, IEM_ACCESS_DATA_W, u16Fsw); 14984 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);14984 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14985 14985 IEM_MC_ELSE() 14986 14986 IEM_MC_IF_FCW_IM() … … 14988 14988 IEM_MC_MEM_COMMIT_AND_UNMAP(pr80Dst, IEM_ACCESS_DATA_W); 14989 14989 IEM_MC_ENDIF(); 14990 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);14990 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 14991 14991 IEM_MC_ENDIF(); 14992 14992 IEM_MC_ADVANCE_RIP(); … … 15209 15209 FNIEMOP_DEF(iemOp_EscF3) 15210 15210 { 15211 p IemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;15211 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1; 15212 15212 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 15213 15213 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 15359 15359 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15360 15360 15361 IEM_MC_FETCH_MEM_R64(r64Factor2, p IemCpu->iEffSeg, GCPtrEffSrc);15361 IEM_MC_FETCH_MEM_R64(r64Factor2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15362 15362 IEM_MC_PREPARE_FPU_USAGE(); 15363 15363 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Factor1, 0) 15364 15364 IEM_MC_CALL_FPU_AIMPL_3(pfnImpl, pFpuRes, pr80Factor1, pr64Factor2); 15365 IEM_MC_STORE_FPU_RESULT_MEM_OP(FpuRes, 0, p IemCpu->iEffSeg, GCPtrEffSrc);15365 IEM_MC_STORE_FPU_RESULT_MEM_OP(FpuRes, 0, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15366 15366 IEM_MC_ELSE() 15367 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(0, p IemCpu->iEffSeg, GCPtrEffSrc);15367 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(0, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15368 15368 IEM_MC_ENDIF(); 15369 15369 IEM_MC_ADVANCE_RIP(); … … 15408 15408 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 15409 15409 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15410 IEM_MC_FETCH_MEM_R64(r64Val2, p IemCpu->iEffSeg, GCPtrEffSrc);15410 IEM_MC_FETCH_MEM_R64(r64Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15411 15411 15412 15412 IEM_MC_PREPARE_FPU_USAGE(); 15413 15413 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 15414 15414 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2); 15415 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);15415 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15416 15416 IEM_MC_ELSE() 15417 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);15417 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15418 15418 IEM_MC_ENDIF(); 15419 15419 IEM_MC_ADVANCE_RIP(); … … 15442 15442 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 15443 15443 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15444 IEM_MC_FETCH_MEM_R64(r64Val2, p IemCpu->iEffSeg, GCPtrEffSrc);15444 IEM_MC_FETCH_MEM_R64(r64Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15445 15445 15446 15446 IEM_MC_PREPARE_FPU_USAGE(); 15447 15447 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 15448 15448 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2); 15449 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);15449 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15450 15450 IEM_MC_ELSE() 15451 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);15451 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15452 15452 IEM_MC_ENDIF(); 15453 15453 IEM_MC_ADVANCE_RIP(); … … 15493 15493 FNIEMOP_DEF(iemOp_EscF4) 15494 15494 { 15495 p IemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;15495 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1; 15496 15496 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 15497 15497 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 15546 15546 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15547 15547 15548 IEM_MC_FETCH_MEM_R64(r64Val, p IemCpu->iEffSeg, GCPtrEffSrc);15548 IEM_MC_FETCH_MEM_R64(r64Val, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15549 15549 IEM_MC_PREPARE_FPU_USAGE(); 15550 15550 IEM_MC_IF_FPUREG_IS_EMPTY(7) 15551 15551 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r64_to_r80, pFpuRes, pr64Val); 15552 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, p IemCpu->iEffSeg, GCPtrEffSrc);15552 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15553 15553 IEM_MC_ELSE() 15554 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(p IemCpu->iEffSeg, GCPtrEffSrc);15554 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15555 15555 IEM_MC_ENDIF(); 15556 15556 IEM_MC_ADVANCE_RIP(); … … 15577 15577 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15578 15578 15579 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);15579 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 15580 15580 IEM_MC_PREPARE_FPU_USAGE(); 15581 15581 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 15582 15582 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i64, pu16Fsw, pi64Dst, pr80Value); 15583 15583 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw); 15584 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);15584 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 15585 15585 IEM_MC_ELSE() 15586 15586 IEM_MC_IF_FCW_IM() … … 15588 15588 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W); 15589 15589 IEM_MC_ENDIF(); 15590 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);15590 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 15591 15591 IEM_MC_ENDIF(); 15592 15592 IEM_MC_ADVANCE_RIP(); … … 15613 15613 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15614 15614 15615 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);15615 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 15616 15616 IEM_MC_PREPARE_FPU_USAGE(); 15617 15617 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 15618 15618 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value); 15619 15619 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw); 15620 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);15620 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 15621 15621 IEM_MC_ELSE() 15622 15622 IEM_MC_IF_FCW_IM() … … 15624 15624 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W); 15625 15625 IEM_MC_ENDIF(); 15626 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);15626 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 15627 15627 IEM_MC_ENDIF(); 15628 15628 IEM_MC_ADVANCE_RIP(); … … 15651 15651 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15652 15652 15653 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);15653 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 15654 15654 IEM_MC_PREPARE_FPU_USAGE(); 15655 15655 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 15656 15656 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value); 15657 15657 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw); 15658 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);15658 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 15659 15659 IEM_MC_ELSE() 15660 15660 IEM_MC_IF_FCW_IM() … … 15662 15662 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W); 15663 15663 IEM_MC_ENDIF(); 15664 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);15664 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 15665 15665 IEM_MC_ENDIF(); 15666 15666 IEM_MC_ADVANCE_RIP(); … … 15676 15676 IEMOP_MNEMONIC("frstor m94/108byte"); 15677 15677 IEM_MC_BEGIN(3, 0); 15678 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ p IemCpu->enmEffOpSize, 0);15678 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pVCpu->iem.s.enmEffOpSize, 0); 15679 15679 IEM_MC_ARG(uint8_t, iEffSeg, 1); 15680 15680 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2); … … 15683 15683 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 15684 15684 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE(); 15685 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);15685 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 15686 15686 IEM_MC_CALL_CIMPL_3(iemCImpl_frstor, enmEffOpSize, iEffSeg, GCPtrEffSrc); 15687 15687 IEM_MC_END(); … … 15695 15695 IEMOP_MNEMONIC("fnsave m94/108byte"); 15696 15696 IEM_MC_BEGIN(3, 0); 15697 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ p IemCpu->enmEffOpSize, 0);15697 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pVCpu->iem.s.enmEffOpSize, 0); 15698 15698 IEM_MC_ARG(uint8_t, iEffSeg, 1); 15699 15699 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2); … … 15702 15702 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 15703 15703 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 15704 IEM_MC_ASSIGN(iEffSeg, p IemCpu->iEffSeg);15704 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 15705 15705 IEM_MC_CALL_CIMPL_3(iemCImpl_fnsave, enmEffOpSize, iEffSeg, GCPtrEffDst); 15706 15706 IEM_MC_END(); … … 15724 15724 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 15725 15725 IEM_MC_FETCH_FSW(u16Tmp); 15726 IEM_MC_STORE_MEM_U16(p IemCpu->iEffSeg, GCPtrEffDst, u16Tmp);15726 IEM_MC_STORE_MEM_U16(pVCpu->iem.s.iEffSeg, GCPtrEffDst, u16Tmp); 15727 15727 IEM_MC_ADVANCE_RIP(); 15728 15728 … … 15803 15803 FNIEMOP_DEF(iemOp_EscF5) 15804 15804 { 15805 p IemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;15805 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1; 15806 15806 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 15807 15807 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 15915 15915 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 15916 15916 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15917 IEM_MC_FETCH_MEM_I16(i16Val2, p IemCpu->iEffSeg, GCPtrEffSrc);15917 IEM_MC_FETCH_MEM_I16(i16Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15918 15918 15919 15919 IEM_MC_PREPARE_FPU_USAGE(); … … 15965 15965 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 15966 15966 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 15967 IEM_MC_FETCH_MEM_I16(i16Val2, p IemCpu->iEffSeg, GCPtrEffSrc);15967 IEM_MC_FETCH_MEM_I16(i16Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15968 15968 15969 15969 IEM_MC_PREPARE_FPU_USAGE(); 15970 15970 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 15971 15971 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2); 15972 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);15972 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15973 15973 IEM_MC_ELSE() 15974 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);15974 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 15975 15975 IEM_MC_ENDIF(); 15976 15976 IEM_MC_ADVANCE_RIP(); … … 15999 15999 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 16000 16000 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 16001 IEM_MC_FETCH_MEM_I16(i16Val2, p IemCpu->iEffSeg, GCPtrEffSrc);16001 IEM_MC_FETCH_MEM_I16(i16Val2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16002 16002 16003 16003 IEM_MC_PREPARE_FPU_USAGE(); 16004 16004 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0) 16005 16005 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2); 16006 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffSrc);16006 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16007 16007 IEM_MC_ELSE() 16008 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffSrc);16008 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16009 16009 IEM_MC_ENDIF(); 16010 16010 IEM_MC_ADVANCE_RIP(); … … 16050 16050 FNIEMOP_DEF(iemOp_EscF6) 16051 16051 { 16052 p IemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;16052 pVCpu->iem.s.offFpuOpcode = pVCpu->iem.s.offOpcode - 1; 16053 16053 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 16054 16054 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 16161 16161 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 16162 16162 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 16163 IEM_MC_FETCH_MEM_I16(i16Val, p IemCpu->iEffSeg, GCPtrEffSrc);16163 IEM_MC_FETCH_MEM_I16(i16Val, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16164 16164 16165 16165 IEM_MC_PREPARE_FPU_USAGE(); 16166 16166 IEM_MC_IF_FPUREG_IS_EMPTY(7) 16167 16167 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i16_to_r80, pFpuRes, pi16Val); 16168 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, p IemCpu->iEffSeg, GCPtrEffSrc);16168 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16169 16169 IEM_MC_ELSE() 16170 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(p IemCpu->iEffSeg, GCPtrEffSrc);16170 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16171 16171 IEM_MC_ENDIF(); 16172 16172 IEM_MC_ADVANCE_RIP(); … … 16193 16193 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 16194 16194 16195 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);16195 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 16196 16196 IEM_MC_PREPARE_FPU_USAGE(); 16197 16197 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 16198 16198 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i16, pu16Fsw, pi16Dst, pr80Value); 16199 16199 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw); 16200 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);16200 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16201 16201 IEM_MC_ELSE() 16202 16202 IEM_MC_IF_FCW_IM() … … 16204 16204 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W); 16205 16205 IEM_MC_ENDIF(); 16206 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);16206 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16207 16207 IEM_MC_ENDIF(); 16208 16208 IEM_MC_ADVANCE_RIP(); … … 16229 16229 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 16230 16230 16231 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);16231 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 16232 16232 IEM_MC_PREPARE_FPU_USAGE(); 16233 16233 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 16234 16234 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value); 16235 16235 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw); 16236 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);16236 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16237 16237 IEM_MC_ELSE() 16238 16238 IEM_MC_IF_FCW_IM() … … 16240 16240 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W); 16241 16241 IEM_MC_ENDIF(); 16242 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);16242 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16243 16243 IEM_MC_ENDIF(); 16244 16244 IEM_MC_ADVANCE_RIP(); … … 16265 16265 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 16266 16266 16267 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);16267 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 16268 16268 IEM_MC_PREPARE_FPU_USAGE(); 16269 16269 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 16270 16270 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value); 16271 16271 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw); 16272 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);16272 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16273 16273 IEM_MC_ELSE() 16274 16274 IEM_MC_IF_FCW_IM() … … 16276 16276 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W); 16277 16277 IEM_MC_ENDIF(); 16278 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);16278 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16279 16279 IEM_MC_ENDIF(); 16280 16280 IEM_MC_ADVANCE_RIP(); … … 16306 16306 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 16307 16307 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 16308 IEM_MC_FETCH_MEM_I64(i64Val, p IemCpu->iEffSeg, GCPtrEffSrc);16308 IEM_MC_FETCH_MEM_I64(i64Val, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16309 16309 16310 16310 IEM_MC_PREPARE_FPU_USAGE(); 16311 16311 IEM_MC_IF_FPUREG_IS_EMPTY(7) 16312 16312 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i64_to_r80, pFpuRes, pi64Val); 16313 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, p IemCpu->iEffSeg, GCPtrEffSrc);16313 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16314 16314 IEM_MC_ELSE() 16315 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(p IemCpu->iEffSeg, GCPtrEffSrc);16315 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 16316 16316 IEM_MC_ENDIF(); 16317 16317 IEM_MC_ADVANCE_RIP(); … … 16342 16342 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 16343 16343 16344 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, p IemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);16344 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/); 16345 16345 IEM_MC_PREPARE_FPU_USAGE(); 16346 16346 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) 16347 16347 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i64, pu16Fsw, pi64Dst, pr80Value); 16348 16348 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw); 16349 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, p IemCpu->iEffSeg, GCPtrEffDst);16349 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16350 16350 IEM_MC_ELSE() 16351 16351 IEM_MC_IF_FCW_IM() … … 16353 16353 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W); 16354 16354 IEM_MC_ENDIF(); 16355 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, p IemCpu->iEffSeg, GCPtrEffDst);16355 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 16356 16356 IEM_MC_ENDIF(); 16357 16357 IEM_MC_ADVANCE_RIP(); … … 16409 16409 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 16410 16410 16411 switch (p IemCpu->enmEffAddrMode)16411 switch (pVCpu->iem.s.enmEffAddrMode) 16412 16412 { 16413 16413 case IEMMODE_16BIT: … … 16457 16457 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 16458 16458 16459 switch (p IemCpu->enmEffAddrMode)16459 switch (pVCpu->iem.s.enmEffAddrMode) 16460 16460 { 16461 16461 case IEMMODE_16BIT: … … 16508 16508 * using the 32-bit operand size override. How can that be restarted? See 16509 16509 * weird pseudo code in intel manual. */ 16510 switch (p IemCpu->enmEffAddrMode)16510 switch (pVCpu->iem.s.enmEffAddrMode) 16511 16511 { 16512 16512 case IEMMODE_16BIT: 16513 16513 IEM_MC_BEGIN(0,0); 16514 if (-(int8_t)p IemCpu->offOpcode != i8Imm)16514 if (-(int8_t)pVCpu->iem.s.offOpcode != i8Imm) 16515 16515 { 16516 16516 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1); … … 16531 16531 case IEMMODE_32BIT: 16532 16532 IEM_MC_BEGIN(0,0); 16533 if (-(int8_t)p IemCpu->offOpcode != i8Imm)16533 if (-(int8_t)pVCpu->iem.s.offOpcode != i8Imm) 16534 16534 { 16535 16535 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1); … … 16550 16550 case IEMMODE_64BIT: 16551 16551 IEM_MC_BEGIN(0,0); 16552 if (-(int8_t)p IemCpu->offOpcode != i8Imm)16552 if (-(int8_t)pVCpu->iem.s.offOpcode != i8Imm) 16553 16553 { 16554 16554 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1); … … 16580 16580 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 16581 16581 16582 switch (p IemCpu->enmEffAddrMode)16582 switch (pVCpu->iem.s.enmEffAddrMode) 16583 16583 { 16584 16584 case IEMMODE_16BIT: … … 16633 16633 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 16634 16634 IEMOP_HLP_NO_LOCK_PREFIX(); 16635 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, p IemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);16635 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT ? 2 : 4); 16636 16636 } 16637 16637 … … 16653 16653 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 16654 16654 IEMOP_HLP_NO_LOCK_PREFIX(); 16655 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, p IemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);16655 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT ? 2 : 4); 16656 16656 } 16657 16657 … … 16662 16662 IEMOP_MNEMONIC("call Jv"); 16663 16663 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 16664 switch (p IemCpu->enmEffOpSize)16664 switch (pVCpu->iem.s.enmEffOpSize) 16665 16665 { 16666 16666 case IEMMODE_16BIT: … … 16692 16692 IEMOP_MNEMONIC("jmp Jv"); 16693 16693 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 16694 switch (p IemCpu->enmEffOpSize)16694 switch (pVCpu->iem.s.enmEffOpSize) 16695 16695 { 16696 16696 case IEMMODE_16BIT: … … 16726 16726 /* Decode the far pointer address and pass it on to the far call C implementation. */ 16727 16727 uint32_t offSeg; 16728 if (p IemCpu->enmEffOpSize != IEMMODE_16BIT)16728 if (pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT) 16729 16729 IEM_OPCODE_GET_NEXT_U32(&offSeg); 16730 16730 else … … 16732 16732 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel); 16733 16733 IEMOP_HLP_NO_LOCK_PREFIX(); 16734 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, p IemCpu->enmEffOpSize);16734 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pVCpu->iem.s.enmEffOpSize); 16735 16735 } 16736 16736 … … 16765 16765 IEMOP_MNEMONIC("in eAX,DX"); 16766 16766 IEMOP_HLP_NO_LOCK_PREFIX(); 16767 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, p IemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);16767 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT ? 2 : 4); 16768 16768 } 16769 16769 … … 16783 16783 IEMOP_MNEMONIC("out DX,eAX"); 16784 16784 IEMOP_HLP_NO_LOCK_PREFIX(); 16785 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, p IemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);16785 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT ? 2 : 4); 16786 16786 } 16787 16787 … … 16791 16791 { 16792 16792 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("lock"); 16793 p IemCpu->fPrefixes |= IEM_OP_PRF_LOCK;16793 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_LOCK; 16794 16794 16795 16795 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 16812 16812 { 16813 16813 /* This overrides any previous REPE prefix. */ 16814 p IemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;16814 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REPZ; 16815 16815 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne"); 16816 p IemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;16816 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REPNZ; 16817 16817 16818 16818 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 16825 16825 { 16826 16826 /* This overrides any previous REPNE prefix. */ 16827 p IemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;16827 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REPNZ; 16828 16828 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repe"); 16829 p IemCpu->fPrefixes |= IEM_OP_PRF_REPZ;16829 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_REPZ; 16830 16830 16831 16831 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b); … … 16869 16869 IEM_MC_ARG(uint8_t *, pu8Dst, 0); 16870 16870 IEM_MC_ARG(uint32_t *, pEFlags, 1); 16871 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);16871 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 16872 16872 IEM_MC_REF_EFLAGS(pEFlags); 16873 16873 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags); … … 16884 16884 16885 16885 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 16886 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);16886 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 16887 16887 IEM_MC_FETCH_EFLAGS(EFlags); 16888 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))16888 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 16889 16889 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags); 16890 16890 else … … 16910 16910 /* Registers are handled by a common worker. */ 16911 16911 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 16912 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);16912 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 16913 16913 16914 16914 /* Memory we do here. */ 16915 switch (p IemCpu->enmEffOpSize)16915 switch (pVCpu->iem.s.enmEffOpSize) 16916 16916 { 16917 16917 case IEMMODE_16BIT: … … 16922 16922 16923 16923 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 16924 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);16924 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 16925 16925 IEM_MC_FETCH_EFLAGS(EFlags); 16926 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))16926 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 16927 16927 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags); 16928 16928 else … … 16942 16942 16943 16943 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 16944 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);16944 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 16945 16945 IEM_MC_FETCH_EFLAGS(EFlags); 16946 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))16946 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 16947 16947 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags); 16948 16948 else … … 16962 16962 16963 16963 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 16964 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);16964 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 16965 16965 IEM_MC_FETCH_EFLAGS(EFlags); 16966 if (!(p IemCpu->fPrefixes & IEM_OP_PRF_LOCK))16966 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 16967 16967 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags); 16968 16968 else … … 16996 16996 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1); 16997 16997 IEM_MC_ARG(uint32_t *, pEFlags, 2); 16998 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);16998 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 16999 16999 IEM_MC_REF_EFLAGS(pEFlags); 17000 17000 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags); … … 17016 17016 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); 17017 17017 IEM_MC_ASSIGN(u8Src, u8Imm); 17018 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);17018 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 17019 17019 IEM_MC_FETCH_EFLAGS(EFlags); 17020 17020 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags); … … 17039 17039 { 17040 17040 /* register access */ 17041 switch (p IemCpu->enmEffOpSize)17041 switch (pVCpu->iem.s.enmEffOpSize) 17042 17042 { 17043 17043 case IEMMODE_16BIT: … … 17048 17048 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1); 17049 17049 IEM_MC_ARG(uint32_t *, pEFlags, 2); 17050 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17050 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17051 17051 IEM_MC_REF_EFLAGS(pEFlags); 17052 17052 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags); … … 17063 17063 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1); 17064 17064 IEM_MC_ARG(uint32_t *, pEFlags, 2); 17065 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17065 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17066 17066 IEM_MC_REF_EFLAGS(pEFlags); 17067 17067 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags); … … 17079 17079 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1); 17080 17080 IEM_MC_ARG(uint32_t *, pEFlags, 2); 17081 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17081 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17082 17082 IEM_MC_REF_EFLAGS(pEFlags); 17083 17083 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags); … … 17093 17093 { 17094 17094 /* memory access. */ 17095 switch (p IemCpu->enmEffOpSize)17095 switch (pVCpu->iem.s.enmEffOpSize) 17096 17096 { 17097 17097 case IEMMODE_16BIT: … … 17106 17106 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); 17107 17107 IEM_MC_ASSIGN(u16Src, u16Imm); 17108 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);17108 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 17109 17109 IEM_MC_FETCH_EFLAGS(EFlags); 17110 17110 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags); … … 17128 17128 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); 17129 17129 IEM_MC_ASSIGN(u32Src, u32Imm); 17130 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);17130 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 17131 17131 IEM_MC_FETCH_EFLAGS(EFlags); 17132 17132 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags); … … 17150 17150 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); 17151 17151 IEM_MC_ASSIGN(u64Src, u64Imm); 17152 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, p IemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);17152 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 17153 17153 IEM_MC_FETCH_EFLAGS(EFlags); 17154 17154 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags); … … 17182 17182 IEM_MC_LOCAL(int32_t, rc); 17183 17183 17184 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17184 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17185 17185 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); 17186 17186 IEM_MC_REF_EFLAGS(pEFlags); … … 17207 17207 17208 17208 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 17209 IEM_MC_FETCH_MEM_U8(u8Value, p IemCpu->iEffSeg, GCPtrEffDst);17209 IEM_MC_FETCH_MEM_U8(u8Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 17210 17210 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); 17211 17211 IEM_MC_REF_EFLAGS(pEFlags); … … 17232 17232 { 17233 17233 /* register access */ 17234 switch (p IemCpu->enmEffOpSize)17234 switch (pVCpu->iem.s.enmEffOpSize) 17235 17235 { 17236 17236 case IEMMODE_16BIT: … … 17244 17244 IEM_MC_LOCAL(int32_t, rc); 17245 17245 17246 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17246 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17247 17247 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); 17248 17248 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX); … … 17269 17269 IEM_MC_LOCAL(int32_t, rc); 17270 17270 17271 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17271 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17272 17272 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX); 17273 17273 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX); … … 17296 17296 IEM_MC_LOCAL(int32_t, rc); 17297 17297 17298 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17298 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17299 17299 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX); 17300 17300 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX); … … 17317 17317 { 17318 17318 /* memory access. */ 17319 switch (p IemCpu->enmEffOpSize)17319 switch (pVCpu->iem.s.enmEffOpSize) 17320 17320 { 17321 17321 case IEMMODE_16BIT: … … 17331 17331 17332 17332 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 17333 IEM_MC_FETCH_MEM_U16(u16Value, p IemCpu->iEffSeg, GCPtrEffDst);17333 IEM_MC_FETCH_MEM_U16(u16Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 17334 17334 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); 17335 17335 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX); … … 17358 17358 17359 17359 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 17360 IEM_MC_FETCH_MEM_U32(u32Value, p IemCpu->iEffSeg, GCPtrEffDst);17360 IEM_MC_FETCH_MEM_U32(u32Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 17361 17361 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX); 17362 17362 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX); … … 17387 17387 17388 17388 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 17389 IEM_MC_FETCH_MEM_U64(u64Value, p IemCpu->iEffSeg, GCPtrEffDst);17389 IEM_MC_FETCH_MEM_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 17390 17390 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX); 17391 17391 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX); … … 17584 17584 { 17585 17585 /* The new RIP is taken from a register. */ 17586 switch (p IemCpu->enmEffOpSize)17586 switch (pVCpu->iem.s.enmEffOpSize) 17587 17587 { 17588 17588 case IEMMODE_16BIT: 17589 17589 IEM_MC_BEGIN(1, 0); 17590 17590 IEM_MC_ARG(uint16_t, u16Target, 0); 17591 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17591 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17592 17592 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target); 17593 17593 IEM_MC_END() … … 17597 17597 IEM_MC_BEGIN(1, 0); 17598 17598 IEM_MC_ARG(uint32_t, u32Target, 0); 17599 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17599 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17600 17600 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target); 17601 17601 IEM_MC_END() … … 17605 17605 IEM_MC_BEGIN(1, 0); 17606 17606 IEM_MC_ARG(uint64_t, u64Target, 0); 17607 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17607 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17608 17608 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target); 17609 17609 IEM_MC_END() … … 17616 17616 { 17617 17617 /* The new RIP is taken from a register. */ 17618 switch (p IemCpu->enmEffOpSize)17618 switch (pVCpu->iem.s.enmEffOpSize) 17619 17619 { 17620 17620 case IEMMODE_16BIT: … … 17623 17623 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17624 17624 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17625 IEM_MC_FETCH_MEM_U16(u16Target, p IemCpu->iEffSeg, GCPtrEffSrc);17625 IEM_MC_FETCH_MEM_U16(u16Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17626 17626 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target); 17627 17627 IEM_MC_END() … … 17633 17633 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17634 17634 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17635 IEM_MC_FETCH_MEM_U32(u32Target, p IemCpu->iEffSeg, GCPtrEffSrc);17635 IEM_MC_FETCH_MEM_U32(u32Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17636 17636 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target); 17637 17637 IEM_MC_END() … … 17643 17643 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17644 17644 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17645 IEM_MC_FETCH_MEM_U64(u64Target, p IemCpu->iEffSeg, GCPtrEffSrc);17645 IEM_MC_FETCH_MEM_U64(u64Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17646 17646 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target); 17647 17647 IEM_MC_END() … … 17662 17662 17663 17663 /* Far pointer loaded from memory. */ 17664 switch (p IemCpu->enmEffOpSize)17664 switch (pVCpu->iem.s.enmEffOpSize) 17665 17665 { 17666 17666 case IEMMODE_16BIT: … … 17672 17672 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17673 17673 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 17674 IEM_MC_FETCH_MEM_U16(offSeg, p IemCpu->iEffSeg, GCPtrEffSrc);17675 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc, 2);17674 IEM_MC_FETCH_MEM_U16(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17675 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 2); 17676 17676 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize); 17677 17677 IEM_MC_END(); … … 17682 17682 * and will apparently ignore REX.W, at least for the jmp far qword [rsp] 17683 17683 * and call far qword [rsp] encodings. */ 17684 if (!IEM_IS_GUEST_CPU_AMD(p IemCpu))17684 if (!IEM_IS_GUEST_CPU_AMD(pVCpu)) 17685 17685 { 17686 17686 IEM_MC_BEGIN(3, 1); … … 17691 17691 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17692 17692 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 17693 IEM_MC_FETCH_MEM_U64(offSeg, p IemCpu->iEffSeg, GCPtrEffSrc);17694 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc, 8);17693 IEM_MC_FETCH_MEM_U64(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17694 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 8); 17695 17695 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize); 17696 17696 IEM_MC_END(); … … 17707 17707 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17708 17708 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 17709 IEM_MC_FETCH_MEM_U32(offSeg, p IemCpu->iEffSeg, GCPtrEffSrc);17710 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, p IemCpu->iEffSeg, GCPtrEffSrc, 4);17709 IEM_MC_FETCH_MEM_U32(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17710 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 4); 17711 17711 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize); 17712 17712 IEM_MC_END(); … … 17742 17742 { 17743 17743 /* The new RIP is taken from a register. */ 17744 switch (p IemCpu->enmEffOpSize)17744 switch (pVCpu->iem.s.enmEffOpSize) 17745 17745 { 17746 17746 case IEMMODE_16BIT: 17747 17747 IEM_MC_BEGIN(0, 1); 17748 17748 IEM_MC_LOCAL(uint16_t, u16Target); 17749 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17749 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17750 17750 IEM_MC_SET_RIP_U16(u16Target); 17751 17751 IEM_MC_END() … … 17755 17755 IEM_MC_BEGIN(0, 1); 17756 17756 IEM_MC_LOCAL(uint32_t, u32Target); 17757 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17757 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17758 17758 IEM_MC_SET_RIP_U32(u32Target); 17759 17759 IEM_MC_END() … … 17763 17763 IEM_MC_BEGIN(0, 1); 17764 17764 IEM_MC_LOCAL(uint64_t, u64Target); 17765 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17765 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17766 17766 IEM_MC_SET_RIP_U64(u64Target); 17767 17767 IEM_MC_END() … … 17774 17774 { 17775 17775 /* The new RIP is taken from a memory location. */ 17776 switch (p IemCpu->enmEffOpSize)17776 switch (pVCpu->iem.s.enmEffOpSize) 17777 17777 { 17778 17778 case IEMMODE_16BIT: … … 17781 17781 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17782 17782 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17783 IEM_MC_FETCH_MEM_U16(u16Target, p IemCpu->iEffSeg, GCPtrEffSrc);17783 IEM_MC_FETCH_MEM_U16(u16Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17784 17784 IEM_MC_SET_RIP_U16(u16Target); 17785 17785 IEM_MC_END() … … 17791 17791 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17792 17792 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17793 IEM_MC_FETCH_MEM_U32(u32Target, p IemCpu->iEffSeg, GCPtrEffSrc);17793 IEM_MC_FETCH_MEM_U32(u32Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17794 17794 IEM_MC_SET_RIP_U32(u32Target); 17795 17795 IEM_MC_END() … … 17801 17801 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17802 17802 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17803 IEM_MC_FETCH_MEM_U64(u64Target, p IemCpu->iEffSeg, GCPtrEffSrc);17803 IEM_MC_FETCH_MEM_U64(u64Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17804 17804 IEM_MC_SET_RIP_U64(u64Target); 17805 17805 IEM_MC_END() … … 17834 17834 /* Registers are handled by a common worker. */ 17835 17835 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 17836 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | p IemCpu->uRexB);17836 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 17837 17837 17838 17838 /* Memory we do here. */ 17839 17839 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 17840 switch (p IemCpu->enmEffOpSize)17840 switch (pVCpu->iem.s.enmEffOpSize) 17841 17841 { 17842 17842 case IEMMODE_16BIT: … … 17845 17845 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17846 17846 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17847 IEM_MC_FETCH_MEM_U16(u16Src, p IemCpu->iEffSeg, GCPtrEffSrc);17847 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17848 17848 IEM_MC_PUSH_U16(u16Src); 17849 17849 IEM_MC_ADVANCE_RIP(); … … 17856 17856 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17857 17857 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17858 IEM_MC_FETCH_MEM_U32(u32Src, p IemCpu->iEffSeg, GCPtrEffSrc);17858 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17859 17859 IEM_MC_PUSH_U32(u32Src); 17860 17860 IEM_MC_ADVANCE_RIP(); … … 17867 17867 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 17868 17868 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 17869 IEM_MC_FETCH_MEM_U64(u64Src, p IemCpu->iEffSeg, GCPtrEffSrc);17869 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 17870 17870 IEM_MC_PUSH_U64(u64Src); 17871 17871 IEM_MC_ADVANCE_RIP(); -
trunk/src/VBox/VMM/include/IEMInternal.h
r62010 r62015 264 264 mov r8d, ACCESS_FLAGS 265 265 mov rdx, [VA] 266 mov rcx, [p IemCpu]266 mov rcx, [pVCpu] 267 267 call iemTlbTypeMiss 268 268 .Done: … … 648 648 /** Gets the current IEMTARGETCPU value. 649 649 * @returns IEMTARGETCPU value. 650 * @param a_p IemCpuThe IEM per CPU instance data.650 * @param a_pVCpu The IEM per CPU instance data. 651 651 */ 652 652 #if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC 653 # define IEM_GET_TARGET_CPU(a_p IemCpu) (IEM_CFG_TARGET_CPU)653 # define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU) 654 654 #else 655 # define IEM_GET_TARGET_CPU(a_p IemCpu) ((a_pIemCpu)->uTargetCpu)655 # define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu) 656 656 #endif 657 657 … … 785 785 * of an if statement. */ 786 786 #ifdef IEM_VERIFICATION_MODE_FULL 787 # define IEM_VERIFICATION_ENABLED(a_p IemCpu) (!(a_pIemCpu)->fNoRem)787 # define IEM_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem) 788 788 #elif defined(IEM_VERIFICATION_MODE_MINIMAL) 789 # define IEM_VERIFICATION_ENABLED(a_p IemCpu)(true)789 # define IEM_VERIFICATION_ENABLED(a_pVCpu) (true) 790 790 #else 791 # define IEM_VERIFICATION_ENABLED(a_p IemCpu)(false)791 # define IEM_VERIFICATION_ENABLED(a_pVCpu) (false) 792 792 #endif 793 793 … … 799 799 * of an if statement. */ 800 800 #ifdef IEM_VERIFICATION_MODE_FULL 801 # define IEM_FULL_VERIFICATION_ENABLED(a_p IemCpu) (!(a_pIemCpu)->fNoRem)801 # define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem) 802 802 #else 803 # define IEM_FULL_VERIFICATION_ENABLED(a_p IemCpu) (false)803 # define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (false) 804 804 #endif 805 805 … … 812 812 #ifdef IEM_VERIFICATION_MODE_FULL 813 813 # ifdef IEM_VERIFICATION_MODE_FULL_HM 814 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_p IemCpu) (!(a_pIemCpu)->fNoRem && !HMIsEnabled(IEMCPU_TO_VM(a_pIemCpu)))814 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem && !HMIsEnabled((a_pVCpu)->CTX_SUFF(pVM))) 815 815 # else 816 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_p IemCpu) (!(a_pIemCpu)->fNoRem)816 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem) 817 817 # endif 818 818 #else 819 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_p IemCpu)(false)819 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (false) 820 820 #endif 821 821 … … 836 836 */ 837 837 #ifdef IEM_VERIFICATION_MODE_FULL 838 # define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { p IemCpu->fUndefinedEFlags |= (a_fEfl); } while (0)838 # define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { pVCpu->iem.s.fUndefinedEFlags |= (a_fEfl); } while (0) 839 839 #else 840 840 # define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0) … … 1544 1544 */ 1545 1545 # define IEM_CIMPL_DECL_TYPE_0(a_Name) \ 1546 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr))1546 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr)) 1547 1547 /** 1548 1548 * For defining a C instruction implementation function taking no extra … … 1552 1552 */ 1553 1553 # define IEM_CIMPL_DEF_0(a_Name) \ 1554 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr))1554 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr)) 1555 1555 /** 1556 1556 * For calling a C instruction implementation function taking no extra … … 1562 1562 * @param a_fn The name of the function. 1563 1563 */ 1564 # define IEM_CIMPL_CALL_0(a_fn) a_fn(p IemCpu, cbInstr)1564 # define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr) 1565 1565 1566 1566 /** … … 1573 1573 */ 1574 1574 # define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \ 1575 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0))1575 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0)) 1576 1576 /** 1577 1577 * For defining a C instruction implementation function taking one extra … … 1583 1583 */ 1584 1584 # define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \ 1585 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0))1585 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0)) 1586 1586 /** 1587 1587 * For calling a C instruction implementation function taking one extra … … 1594 1594 * @param a0 The name of the 1st argument. 1595 1595 */ 1596 # define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(p IemCpu, cbInstr, (a0))1596 # define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0)) 1597 1597 1598 1598 /** … … 1607 1607 */ 1608 1608 # define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \ 1609 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))1609 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1)) 1610 1610 /** 1611 1611 * For defining a C instruction implementation function taking two extra … … 1619 1619 */ 1620 1620 # define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \ 1621 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))1621 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1)) 1622 1622 /** 1623 1623 * For calling a C instruction implementation function taking two extra … … 1631 1631 * @param a1 The name of the 2nd argument. 1632 1632 */ 1633 # define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(p IemCpu, cbInstr, (a0), (a1))1633 # define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1)) 1634 1634 1635 1635 /** … … 1646 1646 */ 1647 1647 # define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \ 1648 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))1648 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2)) 1649 1649 /** 1650 1650 * For defining a C instruction implementation function taking three extra … … 1660 1660 */ 1661 1661 # define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \ 1662 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))1662 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2)) 1663 1663 /** 1664 1664 * For calling a C instruction implementation function taking three extra … … 1673 1673 * @param a2 The name of the 3rd argument. 1674 1674 */ 1675 # define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(p IemCpu, cbInstr, (a0), (a1), (a2))1675 # define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2)) 1676 1676 1677 1677 … … 1691 1691 */ 1692 1692 # define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \ 1693 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))1693 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3)) 1694 1694 /** 1695 1695 * For defining a C instruction implementation function taking four extra … … 1707 1707 */ 1708 1708 # define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \ 1709 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \1709 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \ 1710 1710 a_Type2 a_Arg2, a_Type3 a_Arg3)) 1711 1711 /** … … 1722 1722 * @param a3 The name of the 4th argument. 1723 1723 */ 1724 # define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(p IemCpu, cbInstr, (a0), (a1), (a2), (a3))1724 # define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3)) 1725 1725 1726 1726 … … 1742 1742 */ 1743 1743 # define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \ 1744 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, \1744 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, \ 1745 1745 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \ 1746 1746 a_Type3 a_Arg3, a_Type4 a_Arg4)) … … 1762 1762 */ 1763 1763 # define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \ 1764 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (P IEMCPU pIemCpu, uint8_t cbInstr, \1764 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, \ 1765 1765 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \ 1766 1766 a_Type3 a_Arg3, a_Type4 a_Arg4)) … … 1779 1779 * @param a4 The name of the 5th argument. 1780 1780 */ 1781 # define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(p IemCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))1781 # define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4)) 1782 1782 1783 1783 /** @} */ -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r61886 r62015 28 28 #include <VBox/log.h> 29 29 #include "../include/IEMInternal.h" 30 #include <VBox/vmm/vm.h> 30 31 31 32 … … 73 74 * @{ */ 74 75 75 typedef VBOXSTRICTRC (* PFNIEMOP)(P IEMCPU pIemCpu);76 typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu); 76 77 #define FNIEMOP_DEF(a_Name) \ 77 static VBOXSTRICTRC a_Name(P IEMCPU pIemCpu) RT_NO_THROW_DEF78 static VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF 78 79 #define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ 79 static VBOXSTRICTRC a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW_DEF80 static VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF 80 81 #define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \ 81 static VBOXSTRICTRC a_Name(P IEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF82 83 typedef VBOXSTRICTRC (* PFNIEMOPRM)(P IEMCPU pIemCpu, uint8_t bRm);82 static VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF 83 84 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm); 84 85 #define FNIEMOPRM_DEF(a_Name) \ 85 static VBOXSTRICTRC a_Name(P IEMCPU pIemCpu, uint8_t bRm) RT_NO_THROW_DEF86 static VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint8_t bRm) RT_NO_THROW_DEF 86 87 87 88 #define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: return VERR_IPE_NOT_REACHED_DEFAULT_CASE … … 144 145 145 146 146 #define FNIEMOP_CALL(a_pfn) (a_pfn)(p IemCpu)147 #define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(p IemCpu, a0)148 #define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(p IemCpu, a0, a1)149 150 #define IEM_IS_REAL_OR_V86_MODE(a_p IemCpu)(g_fRandom)151 #define IEM_IS_LONG_MODE(a_p IemCpu)(g_fRandom)152 #define IEM_IS_REAL_MODE(a_p IemCpu)(g_fRandom)153 #define IEM_IS_GUEST_CPU_AMD(a_p IemCpu)(g_fRandom)154 #define IEM_IS_GUEST_CPU_INTEL(a_p IemCpu)(g_fRandom)155 #define IEM_GET_GUEST_CPU_FEATURES(a_p IemCpu)((PCCPUMFEATURES)(uintptr_t)42)156 #define IEM_GET_HOST_CPU_FEATURES(a_p IemCpu)((PCCPUMFEATURES)(uintptr_t)88)157 158 #define iemRecalEffOpSize(a_p IemCpu)do { } while (0)147 #define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu) 148 #define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0) 149 #define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1) 150 151 #define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (g_fRandom) 152 #define IEM_IS_LONG_MODE(a_pVCpu) (g_fRandom) 153 #define IEM_IS_REAL_MODE(a_pVCpu) (g_fRandom) 154 #define IEM_IS_GUEST_CPU_AMD(a_pVCpu) (g_fRandom) 155 #define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) (g_fRandom) 156 #define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) ((PCCPUMFEATURES)(uintptr_t)42) 157 #define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) ((PCCPUMFEATURES)(uintptr_t)88) 158 159 #define iemRecalEffOpSize(a_pVCpu) do { } while (0) 159 160 160 161 IEMOPBINSIZES g_iemAImpl_add;
Note:
See TracChangeset
for help on using the changeset viewer.