Changeset 72496 in vbox
- Timestamp:
- Jun 10, 2018 5:21:36 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 122981
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72494 r72496 452 452 */ 453 453 # define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 454 do \ 455 { \ 456 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \ 457 } while (0) 454 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0) 458 455 459 456 /** … … 894 891 895 892 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 896 IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1, 897 uint64_t uExitInfo2); 898 IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags, 899 uint32_t uErr, uint64_t uCr2); 893 IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2); 894 IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2); 900 895 #endif 901 896 … … 947 942 * 948 943 * @returns CPU mode. 949 * @param pCtx The register context for the CPU. 950 */ 951 DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx) 952 { 953 if (CPUMIsGuestIn64BitCodeEx(pCtx)) 944 * @param pVCpu The cross context virtual CPU structure of the 945 * calling thread. 946 */ 947 DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu) 948 { 949 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)) 954 950 return IEMMODE_64BIT; 955 if (p Ctx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */951 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */ 956 952 return IEMMODE_32BIT; 957 953 return IEMMODE_16BIT; … … 971 967 DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers) 972 968 { 973 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu); 974 975 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 969 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 976 970 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 977 971 978 972 #if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0) 979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->cs));980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ss));981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->es));982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ds));983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->fs));984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->gs));985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ldtr));986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->tr));973 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs)); 974 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 975 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es)); 976 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds)); 977 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs)); 978 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs)); 979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr)); 980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr)); 987 981 #endif 988 982 … … 991 985 #endif 992 986 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); 993 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);987 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 994 988 #ifdef VBOX_STRICT 995 989 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe; … … 1026 1020 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 1027 1021 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0 1028 && p Ctx->cs.u64Base == 01029 && p Ctx->cs.u32Limit == UINT32_MAX1030 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), p Ctx->eip);1022 && pVCpu->cpum.GstCtx.cs.u64Base == 0 1023 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX 1024 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip); 1031 1025 if (!pVCpu->iem.s.fInPatchCode) 1032 1026 CPUMRawLeave(pVCpu, VINF_SUCCESS); … … 1046 1040 IEM_STATIC void iemReInitExec(PVMCPU pVCpu) 1047 1041 { 1048 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu); 1049 IEMMODE const enmMode = iemCalcCpuMode(pCtx); 1042 IEMMODE const enmMode = iemCalcCpuMode(pVCpu); 1050 1043 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu); 1051 1044 … … 1106 1099 DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers) 1107 1100 { 1108 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu); 1109 1110 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_MUST_MASK); 1101 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 1111 1102 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 1112 1103 1113 1104 #if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0) 1114 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->cs));1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ss));1116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->es));1117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ds));1118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->fs));1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->gs));1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ldtr));1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->tr));1105 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs)); 1106 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 1107 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es)); 1108 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds)); 1109 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs)); 1110 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs)); 1111 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr)); 1112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr)); 1122 1113 #endif 1123 1114 … … 1126 1117 #endif 1127 1118 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); 1128 IEMMODE enmMode = iemCalcCpuMode(p Ctx);1119 IEMMODE enmMode = iemCalcCpuMode(pVCpu); 1129 1120 pVCpu->iem.s.enmCpuMode = enmMode; 1130 1121 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */ … … 1168 1159 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 1169 1160 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0 1170 && p Ctx->cs.u64Base == 01171 && p Ctx->cs.u32Limit == UINT32_MAX1172 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), p Ctx->eip);1161 && pVCpu->cpum.GstCtx.cs.u64Base == 0 1162 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX 1163 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip); 1173 1164 if (!pVCpu->iem.s.fInPatchCode) 1174 1165 CPUMRawLeave(pVCpu, VINF_SUCCESS); … … 1179 1170 { 1180 1171 case IEMMODE_64BIT: 1181 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, p Ctx->rip);1172 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip); 1182 1173 break; 1183 1174 case IEMMODE_32BIT: 1184 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, p Ctx->cs.Sel, pCtx->eip);1175 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip); 1185 1176 break; 1186 1177 case IEMMODE_16BIT: 1187 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, p Ctx->cs.Sel, pCtx->eip);1178 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip); 1188 1179 break; 1189 1180 } … … 1201 1192 DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu) 1202 1193 { 1203 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);1204 1205 1194 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 1206 1195 1207 1196 #if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0) 1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->cs));1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ss));1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->es));1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ds));1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->fs));1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->gs));1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ldtr));1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->tr));1197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs)); 1198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 1199 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es)); 1200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds)); 1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs)); 1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs)); 1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr)); 1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr)); 1216 1205 #endif 1217 1206 1218 1207 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */ 1219 IEMMODE enmMode = iemCalcCpuMode(p Ctx);1208 IEMMODE enmMode = iemCalcCpuMode(pVCpu); 1220 1209 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */ 1221 1210 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */ … … 1243 1232 if (pVCpu->iem.s.pbInstrBuf) 1244 1233 { 1245 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? p Ctx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)1234 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base) 1246 1235 - pVCpu->iem.s.uInstrBufPc; 1247 1236 if (off < pVCpu->iem.s.cbInstrBufTotal) … … 1284 1273 { 1285 1274 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0 1286 && p Ctx->cs.u64Base == 01287 && p Ctx->cs.u32Limit == UINT32_MAX1288 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), p Ctx->eip);1275 && pVCpu->cpum.GstCtx.cs.u64Base == 0 1276 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX 1277 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip); 1289 1278 if (!pVCpu->iem.s.fInPatchCode) 1290 1279 CPUMRawLeave(pVCpu, VINF_SUCCESS); … … 1296 1285 { 1297 1286 case IEMMODE_64BIT: 1298 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, p Ctx->rip);1287 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip); 1299 1288 break; 1300 1289 case IEMMODE_32BIT: 1301 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, p Ctx->cs.Sel, pCtx->eip);1290 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip); 1302 1291 break; 1303 1292 case IEMMODE_16BIT: 1304 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, p Ctx->cs.Sel, pCtx->eip);1293 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip); 1305 1294 break; 1306 1295 } … … 1332 1321 * First translate CS:rIP to a physical address. 1333 1322 */ 1334 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1335 1323 uint32_t cbToTryRead; 1336 1324 RTGCPTR GCPtrPC; … … 1338 1326 { 1339 1327 cbToTryRead = PAGE_SIZE; 1340 GCPtrPC = p Ctx->rip;1328 GCPtrPC = pVCpu->cpum.GstCtx.rip; 1341 1329 if (IEM_IS_CANONICAL(GCPtrPC)) 1342 1330 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK); … … 1346 1334 else 1347 1335 { 1348 uint32_t GCPtrPC32 = p Ctx->eip;1349 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", p Ctx->cs.Sel, pCtx->rip));1350 if (GCPtrPC32 <= p Ctx->cs.u32Limit)1351 cbToTryRead = p Ctx->cs.u32Limit - GCPtrPC32 + 1;1336 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip; 1337 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1338 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit) 1339 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1; 1352 1340 else 1353 1341 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); … … 1355 1343 else /* overflowed */ 1356 1344 { 1357 Assert(GCPtrPC32 == 0); Assert(p Ctx->cs.u32Limit == UINT32_MAX);1345 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX); 1358 1346 cbToTryRead = UINT32_MAX; 1359 1347 } 1360 GCPtrPC = (uint32_t)p Ctx->cs.u64Base + GCPtrPC32;1348 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32; 1361 1349 Assert(GCPtrPC <= UINT32_MAX); 1362 1350 } … … 1390 1378 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1391 1379 } 1392 if (!(fFlags & X86_PTE_PAE_NX) || !(p Ctx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }1380 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ } 1393 1381 else 1394 1382 { … … 1656 1644 * end up in trouble and we need to do that first before faulting. 1657 1645 */ 1658 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1659 1646 RTGCPTR GCPtrFirst; 1660 1647 uint32_t cbMaxRead; 1661 1648 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 1662 1649 { 1663 GCPtrFirst = p Ctx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1650 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart); 1664 1651 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst))) 1665 1652 { /* likely */ } … … 1670 1657 else 1671 1658 { 1672 GCPtrFirst = p Ctx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1659 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart); 1673 1660 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1674 if (RT_LIKELY((uint32_t)GCPtrFirst <= p Ctx->cs.u32Limit))1661 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit)) 1675 1662 { /* likely */ } 1676 1663 else 1677 1664 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1678 cbMaxRead = p Ctx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;1665 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1; 1679 1666 if (cbMaxRead != 0) 1680 1667 { /* likely */ } … … 1682 1669 { 1683 1670 /* Overflowed because address is 0 and limit is max. */ 1684 Assert(GCPtrFirst == 0); Assert(p Ctx->cs.u32Limit == UINT32_MAX);1671 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX); 1685 1672 cbMaxRead = X86_PAGE_SIZE; 1686 1673 } 1687 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)p Ctx->cs.u64Base;1674 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base; 1688 1675 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK); 1689 1676 if (cbMaxRead2 < cbMaxRead) … … 1709 1696 pVCpu->iem.s.CodeTlb.cTlbMisses++; 1710 1697 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 1711 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), p Ctx->eip))1698 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip)) 1712 1699 { 1713 1700 pTlbe->uTag = uTag; … … 1747 1734 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1748 1735 } 1749 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (p Ctx->msrEFER & MSR_K6_EFER_NXE))1736 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) 1750 1737 { 1751 1738 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst)); … … 1917 1904 * First translate CS:rIP to a physical address. 1918 1905 */ 1919 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1920 1906 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin); 1921 1907 uint32_t cbToTryRead; … … 1924 1910 { 1925 1911 cbToTryRead = PAGE_SIZE; 1926 GCPtrNext = p Ctx->rip + pVCpu->iem.s.cbOpcode;1912 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode; 1927 1913 if (!IEM_IS_CANONICAL(GCPtrNext)) 1928 1914 return iemRaiseGeneralProtectionFault0(pVCpu); … … 1930 1916 else 1931 1917 { 1932 uint32_t GCPtrNext32 = p Ctx->eip;1918 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip; 1933 1919 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); 1934 1920 GCPtrNext32 += pVCpu->iem.s.cbOpcode; 1935 if (GCPtrNext32 > p Ctx->cs.u32Limit)1921 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit) 1936 1922 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1937 cbToTryRead = p Ctx->cs.u32Limit - GCPtrNext32 + 1;1923 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1; 1938 1924 if (!cbToTryRead) /* overflowed */ 1939 1925 { 1940 Assert(GCPtrNext32 == 0); Assert(p Ctx->cs.u32Limit == UINT32_MAX);1926 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX); 1941 1927 cbToTryRead = UINT32_MAX; 1942 1928 /** @todo check out wrapping around the code segment. */ … … 1944 1930 if (cbToTryRead < cbMin - cbLeft) 1945 1931 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1946 GCPtrNext = (uint32_t)p Ctx->cs.u64Base + GCPtrNext32;1932 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32; 1947 1933 } 1948 1934 … … 1983 1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 1984 1970 } 1985 if ((fFlags & X86_PTE_PAE_NX) && (p Ctx->msrEFER & MSR_K6_EFER_NXE))1971 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) 1986 1972 { 1987 1973 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext)); … … 3333 3319 * @param pVCpu The cross context virtual CPU structure of the 3334 3320 * calling thread. 3335 * @param pCtx The CPU context.3336 3321 * @param NewSS The new SS selctor. 3337 3322 * @param uCpl The CPL to load the stack for. 3338 3323 * @param pDesc Where to return the descriptor. 3339 3324 */ 3340 IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) 3341 { 3342 NOREF(pCtx); 3343 3325 IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) 3326 { 3344 3327 /* Null selectors are not allowed (we're not called for dispatching 3345 3328 interrupts with SS=0 in long mode). */ … … 3402 3385 * 3403 3386 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 3404 * @param a_pCtx The CPU context.3405 3387 */ 3406 3388 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 3407 # define IEMMISC_GET_EFL(a_pVCpu , a_pCtx)( CPUMRawGetEFlags(a_pVCpu) )3389 # define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) ) 3408 3390 #else 3409 # define IEMMISC_GET_EFL(a_pVCpu , a_pCtx) ( (a_pCtx)->eflags.u )3391 # define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u ) 3410 3392 #endif 3411 3393 … … 3414 3396 * 3415 3397 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 3416 * @param a_pCtx The CPU context.3417 3398 * @param a_fEfl The new EFLAGS. 3418 3399 */ 3419 3400 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 3420 # define IEMMISC_SET_EFL(a_pVCpu, a_ pCtx, a_fEfl)CPUMRawSetEFlags((a_pVCpu), a_fEfl)3401 # define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl) 3421 3402 #else 3422 # define IEMMISC_SET_EFL(a_pVCpu, a_ pCtx, a_fEfl) do { (a_pCtx)->eflags.u = (a_fEfl); } while (0)3403 # define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0) 3423 3404 #endif 3424 3405 … … 3437 3418 * @returns VBox strict status code. 3438 3419 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3439 * @param pCtx The CPU context.3440 3420 * @param uCpl The CPL to load the stack for. 3441 3421 * @param pSelSS Where to return the new stack segment. 3442 3422 * @param puEsp Where to return the new stack pointer. 3443 3423 */ 3444 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, 3445 PRTSEL pSelSS, uint32_t *puEsp) 3424 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) 3446 3425 { 3447 3426 VBOXSTRICTRC rcStrict; 3448 3427 Assert(uCpl < 4); 3449 3428 3450 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx,CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);3451 switch (p Ctx->tr.Attr.n.u4Type)3429 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 3430 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type) 3452 3431 { 3453 3432 /* … … 3458 3437 { 3459 3438 uint32_t off = uCpl * 4 + 2; 3460 if (off + 4 <= p Ctx->tr.u32Limit)3439 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit) 3461 3440 { 3462 3441 /** @todo check actual access pattern here. */ 3463 3442 uint32_t u32Tmp = 0; /* gcc maybe... */ 3464 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, p Ctx->tr.u64Base + off);3443 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off); 3465 3444 if (rcStrict == VINF_SUCCESS) 3466 3445 { … … 3472 3451 else 3473 3452 { 3474 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, p Ctx->tr.u32Limit));3453 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit)); 3475 3454 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu); 3476 3455 } … … 3485 3464 { 3486 3465 uint32_t off = uCpl * 8 + 4; 3487 if (off + 7 <= p Ctx->tr.u32Limit)3466 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit) 3488 3467 { 3489 3468 /** @todo check actual access pattern here. */ 3490 3469 uint64_t u64Tmp; 3491 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, p Ctx->tr.u64Base + off);3470 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off); 3492 3471 if (rcStrict == VINF_SUCCESS) 3493 3472 { … … 3499 3478 else 3500 3479 { 3501 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, p Ctx->tr.u32Limit));3480 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit)); 3502 3481 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu); 3503 3482 } … … 3522 3501 * @returns VBox strict status code. 3523 3502 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3524 * @param pCtx The CPU context.3525 3503 * @param uCpl The CPL to load the stack for. 3526 3504 * @param uIst The interrupt stack table index, 0 if to use uCpl. 3527 3505 * @param puRsp Where to return the new stack pointer. 3528 3506 */ 3529 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)3507 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) 3530 3508 { 3531 3509 Assert(uCpl < 4); … … 3533 3511 *puRsp = 0; /* make gcc happy */ 3534 3512 3535 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx,CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);3536 AssertReturn(p Ctx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);3513 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 3514 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5); 3537 3515 3538 3516 uint32_t off; … … 3541 3519 else 3542 3520 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0); 3543 if (off + sizeof(uint64_t) > p Ctx->tr.u32Limit)3544 { 3545 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, p Ctx->tr.u32Limit));3521 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit) 3522 { 3523 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit)); 3546 3524 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu); 3547 3525 } 3548 3526 3549 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, p Ctx->tr.u64Base + off);3527 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off); 3550 3528 } 3551 3529 … … 3554 3532 * Adjust the CPU state according to the exception being raised. 3555 3533 * 3556 * @param p Ctx The CPU context.3557 * @param u8Vector 3558 */ 3559 DECLINLINE(void) iemRaiseXcptAdjustState(P CPUMCTX pCtx, uint8_t u8Vector)3534 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3535 * @param u8Vector The exception that has been raised. 3536 */ 3537 DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector) 3560 3538 { 3561 3539 switch (u8Vector) 3562 3540 { 3563 3541 case X86_XCPT_DB: 3564 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_DR7);3565 p Ctx->dr[7] &= ~X86_DR7_GD;3542 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7); 3543 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD; 3566 3544 break; 3567 3545 /** @todo Read the AMD and Intel exception reference... */ … … 3575 3553 * @returns VBox strict status code. 3576 3554 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3577 * @param pCtx The CPU context.3578 3555 * @param cbInstr The number of bytes to offset rIP by in the return 3579 3556 * address. … … 3585 3562 IEM_STATIC VBOXSTRICTRC 3586 3563 iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu, 3587 PCPUMCTX pCtx,3588 3564 uint8_t cbInstr, 3589 3565 uint8_t u8Vector, … … 3593 3569 { 3594 3570 NOREF(uErr); NOREF(uCr2); 3595 IEM_CTX_ASSERT(p Ctx, IEM_CPUMCTX_EXTRN_XCPT_MASK);3571 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 3596 3572 3597 3573 /* 3598 3574 * Read the IDT entry. 3599 3575 */ 3600 if (p Ctx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)3601 { 3602 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, p Ctx->idtr.cbIdt));3576 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3) 3577 { 3578 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt)); 3603 3579 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 3604 3580 } 3605 3581 RTFAR16 Idte; 3606 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, p Ctx->idtr.pIdt + UINT32_C(4) * u8Vector);3582 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector); 3607 3583 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 3608 3584 { … … 3620 3596 return rcStrict; 3621 3597 3622 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu , pCtx);3598 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu); 3623 3599 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC 3624 3600 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186); … … 3627 3603 #endif 3628 3604 pu16Frame[2] = (uint16_t)fEfl; 3629 pu16Frame[1] = (uint16_t)p Ctx->cs.Sel;3630 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? p Ctx->ip + cbInstr : pCtx->ip;3605 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel; 3606 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip; 3631 3607 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp); 3632 3608 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) … … 3637 3613 * adjustments. 3638 3614 */ 3639 p Ctx->cs.Sel = Idte.sel;3640 p Ctx->cs.ValidSel = Idte.sel;3641 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;3642 p Ctx->cs.u64Base = (uint32_t)Idte.sel << 4;3615 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel; 3616 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel; 3617 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 3618 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4; 3643 3619 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */ 3644 p Ctx->rip = Idte.off;3620 pVCpu->cpum.GstCtx.rip = Idte.off; 3645 3621 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC); 3646 IEMMISC_SET_EFL(pVCpu, pCtx,fEfl);3622 IEMMISC_SET_EFL(pVCpu, fEfl); 3647 3623 3648 3624 /** @todo do we actually do this in real mode? */ 3649 3625 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT) 3650 iemRaiseXcptAdjustState(p Ctx, u8Vector);3626 iemRaiseXcptAdjustState(pVCpu, u8Vector); 3651 3627 3652 3628 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; … … 3842 3818 * @returns VBox strict status code. 3843 3819 * @param pVCpu The cross context virtual CPU structure of the calling thread. 3844 * @param pCtx The CPU context.3845 3820 * @param enmTaskSwitch What caused this task switch. 3846 3821 * @param uNextEip The EIP effective after the task switch. … … 3853 3828 IEM_STATIC VBOXSTRICTRC 3854 3829 iemTaskSwitch(PVMCPU pVCpu, 3855 PCPUMCTX pCtx,3856 3830 IEMTASKSWITCH enmTaskSwitch, 3857 3831 uint32_t uNextEip, … … 3864 3838 Assert(!IEM_IS_REAL_MODE(pVCpu)); 3865 3839 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 3866 IEM_CTX_ASSERT(p Ctx, IEM_CPUMCTX_EXTRN_XCPT_MASK);3840 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 3867 3841 3868 3842 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type; … … 3876 3850 3877 3851 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS, 3878 fIsNewTSS386, p Ctx->eip, uNextEip));3852 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip)); 3879 3853 3880 3854 /* Update CR2 in case it's a page-fault. */ … … 3882 3856 * @bugref{5653#c49}. */ 3883 3857 if (fFlags & IEM_XCPT_FLAGS_CR2) 3884 p Ctx->cr2 = uCr2;3858 pVCpu->cpum.GstCtx.cr2 = uCr2; 3885 3859 3886 3860 /* … … 3913 3887 if (fFlags & IEM_XCPT_FLAGS_ERR) 3914 3888 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE; 3915 if (p Ctx->eflags.Bits.u1RF)3889 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF) 3916 3890 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF; 3917 3891 … … 3930 3904 * end up with smaller than "legal" TSS limits. 3931 3905 */ 3932 uint32_t const uCurTSSLimit = p Ctx->tr.u32Limit;3906 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit; 3933 3907 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29; 3934 3908 if (uCurTSSLimit < uCurTSSLimitMin) … … 3961 3935 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET. 3962 3936 */ 3963 uint32_t u32EFlags = p Ctx->eflags.u32;3937 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32; 3964 3938 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP 3965 3939 || enmTaskSwitch == IEMTASKSWITCH_IRET) … … 3967 3941 PX86DESC pDescCurTSS; 3968 3942 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX, 3969 p Ctx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);3943 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW); 3970 3944 if (rcStrict != VINF_SUCCESS) 3971 3945 { 3972 3946 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 3973 enmTaskSwitch, p Ctx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3947 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 3974 3948 return rcStrict; 3975 3949 } … … 3980 3954 { 3981 3955 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 3982 enmTaskSwitch, p Ctx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3956 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 3983 3957 return rcStrict; 3984 3958 } … … 3996 3970 * Save the CPU state into the current TSS. 3997 3971 */ 3998 RTGCPTR GCPtrCurTSS = p Ctx->tr.u64Base;3972 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base; 3999 3973 if (GCPtrNewTSS == GCPtrCurTSS) 4000 3974 { 4001 3975 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS)); 4002 3976 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n", 4003 p Ctx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));3977 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel)); 4004 3978 } 4005 3979 if (fIsNewTSS386) … … 4025 3999 pCurTSS32->eip = uNextEip; 4026 4000 pCurTSS32->eflags = u32EFlags; 4027 pCurTSS32->eax = p Ctx->eax;4028 pCurTSS32->ecx = p Ctx->ecx;4029 pCurTSS32->edx = p Ctx->edx;4030 pCurTSS32->ebx = p Ctx->ebx;4031 pCurTSS32->esp = p Ctx->esp;4032 pCurTSS32->ebp = p Ctx->ebp;4033 pCurTSS32->esi = p Ctx->esi;4034 pCurTSS32->edi = p Ctx->edi;4035 pCurTSS32->es = p Ctx->es.Sel;4036 pCurTSS32->cs = p Ctx->cs.Sel;4037 pCurTSS32->ss = p Ctx->ss.Sel;4038 pCurTSS32->ds = p Ctx->ds.Sel;4039 pCurTSS32->fs = p Ctx->fs.Sel;4040 pCurTSS32->gs = p Ctx->gs.Sel;4001 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax; 4002 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx; 4003 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx; 4004 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx; 4005 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp; 4006 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp; 4007 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi; 4008 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi; 4009 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel; 4010 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel; 4011 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel; 4012 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel; 4013 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel; 4014 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel; 4041 4015 4042 4016 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW); … … 4069 4043 pCurTSS16->ip = uNextEip; 4070 4044 pCurTSS16->flags = u32EFlags; 4071 pCurTSS16->ax = p Ctx->ax;4072 pCurTSS16->cx = p Ctx->cx;4073 pCurTSS16->dx = p Ctx->dx;4074 pCurTSS16->bx = p Ctx->bx;4075 pCurTSS16->sp = p Ctx->sp;4076 pCurTSS16->bp = p Ctx->bp;4077 pCurTSS16->si = p Ctx->si;4078 pCurTSS16->di = p Ctx->di;4079 pCurTSS16->es = p Ctx->es.Sel;4080 pCurTSS16->cs = p Ctx->cs.Sel;4081 pCurTSS16->ss = p Ctx->ss.Sel;4082 pCurTSS16->ds = p Ctx->ds.Sel;4045 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax; 4046 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx; 4047 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx; 4048 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx; 4049 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp; 4050 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp; 4051 pCurTSS16->si = pVCpu->cpum.GstCtx.si; 4052 pCurTSS16->di = pVCpu->cpum.GstCtx.di; 4053 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel; 4054 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel; 4055 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel; 4056 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel; 4083 4057 4084 4058 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW); … … 4099 4073 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */ 4100 4074 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS; 4101 pNewTSS->selPrev = p Ctx->tr.Sel;4075 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel; 4102 4076 } 4103 4077 … … 4112 4086 { 4113 4087 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS; 4114 uNewCr3 = (p Ctx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;4088 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0; 4115 4089 uNewEip = pNewTSS32->eip; 4116 4090 uNewEflags = pNewTSS32->eflags; … … 4176 4150 { 4177 4151 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX, 4178 p Ctx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);4152 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW); 4179 4153 if (rcStrict != VINF_SUCCESS) 4180 4154 { 4181 4155 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 4182 enmTaskSwitch, p Ctx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));4156 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 4183 4157 return rcStrict; 4184 4158 } … … 4194 4168 { 4195 4169 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n", 4196 enmTaskSwitch, p Ctx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));4170 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict))); 4197 4171 return rcStrict; 4198 4172 } … … 4203 4177 * until the completion of the task switch but before executing any instructions in the new task. 4204 4178 */ 4205 p Ctx->tr.Sel = SelTSS;4206 p Ctx->tr.ValidSel = SelTSS;4207 p Ctx->tr.fFlags = CPUMSELREG_FLAGS_VALID;4208 p Ctx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);4209 p Ctx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);4210 p Ctx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);4179 pVCpu->cpum.GstCtx.tr.Sel = SelTSS; 4180 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS; 4181 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID; 4182 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy); 4183 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy); 4184 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy); 4211 4185 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR); 4212 4186 4213 4187 /* Set the busy bit in TR. */ 4214 p Ctx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;4188 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 4215 4189 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */ 4216 4190 if ( enmTaskSwitch == IEMTASKSWITCH_CALL … … 4220 4194 } 4221 4195 4222 p Ctx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */4223 p Ctx->cr0 |= X86_CR0_TS;4196 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */ 4197 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS; 4224 4198 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0); 4225 4199 4226 p Ctx->eip = uNewEip;4227 p Ctx->eax = uNewEax;4228 p Ctx->ecx = uNewEcx;4229 p Ctx->edx = uNewEdx;4230 p Ctx->ebx = uNewEbx;4231 p Ctx->esp = uNewEsp;4232 p Ctx->ebp = uNewEbp;4233 p Ctx->esi = uNewEsi;4234 p Ctx->edi = uNewEdi;4200 pVCpu->cpum.GstCtx.eip = uNewEip; 4201 pVCpu->cpum.GstCtx.eax = uNewEax; 4202 pVCpu->cpum.GstCtx.ecx = uNewEcx; 4203 pVCpu->cpum.GstCtx.edx = uNewEdx; 4204 pVCpu->cpum.GstCtx.ebx = uNewEbx; 4205 pVCpu->cpum.GstCtx.esp = uNewEsp; 4206 pVCpu->cpum.GstCtx.ebp = uNewEbp; 4207 pVCpu->cpum.GstCtx.esi = uNewEsi; 4208 pVCpu->cpum.GstCtx.edi = uNewEdi; 4235 4209 4236 4210 uNewEflags &= X86_EFL_LIVE_MASK; 4237 4211 uNewEflags |= X86_EFL_RA1_MASK; 4238 IEMMISC_SET_EFL(pVCpu, pCtx,uNewEflags);4212 IEMMISC_SET_EFL(pVCpu, uNewEflags); 4239 4213 4240 4214 /* … … 4243 4217 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging. 4244 4218 */ 4245 p Ctx->es.Sel = uNewES;4246 p Ctx->es.Attr.u &= ~X86DESCATTR_P;4247 4248 p Ctx->cs.Sel = uNewCS;4249 p Ctx->cs.Attr.u &= ~X86DESCATTR_P;4250 4251 p Ctx->ss.Sel = uNewSS;4252 p Ctx->ss.Attr.u &= ~X86DESCATTR_P;4253 4254 p Ctx->ds.Sel = uNewDS;4255 p Ctx->ds.Attr.u &= ~X86DESCATTR_P;4256 4257 p Ctx->fs.Sel = uNewFS;4258 p Ctx->fs.Attr.u &= ~X86DESCATTR_P;4259 4260 p Ctx->gs.Sel = uNewGS;4261 p Ctx->gs.Attr.u &= ~X86DESCATTR_P;4219 pVCpu->cpum.GstCtx.es.Sel = uNewES; 4220 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P; 4221 4222 pVCpu->cpum.GstCtx.cs.Sel = uNewCS; 4223 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P; 4224 4225 pVCpu->cpum.GstCtx.ss.Sel = uNewSS; 4226 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P; 4227 4228 pVCpu->cpum.GstCtx.ds.Sel = uNewDS; 4229 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P; 4230 4231 pVCpu->cpum.GstCtx.fs.Sel = uNewFS; 4232 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P; 4233 4234 pVCpu->cpum.GstCtx.gs.Sel = uNewGS; 4235 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P; 4262 4236 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 4263 4237 4264 p Ctx->ldtr.Sel = uNewLdt;4265 p Ctx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;4266 p Ctx->ldtr.Attr.u &= ~X86DESCATTR_P;4238 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt; 4239 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE; 4240 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P; 4267 4241 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR); 4268 4242 4269 4243 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 4270 4244 { 4271 p Ctx->es.Attr.u |= X86DESCATTR_UNUSABLE;4272 p Ctx->cs.Attr.u |= X86DESCATTR_UNUSABLE;4273 p Ctx->ss.Attr.u |= X86DESCATTR_UNUSABLE;4274 p Ctx->ds.Attr.u |= X86DESCATTR_UNUSABLE;4275 p Ctx->fs.Attr.u |= X86DESCATTR_UNUSABLE;4276 p Ctx->gs.Attr.u |= X86DESCATTR_UNUSABLE;4277 p Ctx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;4245 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE; 4246 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE; 4247 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE; 4248 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE; 4249 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE; 4250 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE; 4251 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE; 4278 4252 } 4279 4253 … … 4282 4256 */ 4283 4257 if ( fIsNewTSS386 4284 && (p Ctx->cr0 & X86_CR0_PG))4258 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)) 4285 4259 { 4286 4260 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */ … … 4289 4263 4290 4264 /* Inform PGM. */ 4291 rc = PGMFlushTLB(pVCpu, p Ctx->cr3, !(pCtx->cr4 & X86_CR4_PGE));4265 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE)); 4292 4266 AssertRCReturn(rc, rc); 4293 4267 /* ignore informational status codes */ … … 4300 4274 */ 4301 4275 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL)) 4302 iemHlpLoadNullDataSelectorProt(pVCpu, &p Ctx->ldtr, uNewLdt);4276 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt); 4303 4277 else 4304 4278 { 4305 Assert(!p Ctx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */4279 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */ 4306 4280 4307 4281 IEMSELDESC DescNewLdt; … … 4310 4284 { 4311 4285 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch, 4312 uNewLdt, p Ctx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));4286 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict))); 4313 4287 return rcStrict; 4314 4288 } … … 4322 4296 } 4323 4297 4324 p Ctx->ldtr.ValidSel = uNewLdt;4325 p Ctx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;4326 p Ctx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);4327 p Ctx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);4328 p Ctx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);4298 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt; 4299 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 4300 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy); 4301 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy); 4302 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy); 4329 4303 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 4330 p Ctx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;4331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ldtr));4304 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE; 4305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr)); 4332 4306 } 4333 4307 … … 4336 4310 { 4337 4311 pVCpu->iem.s.uCpl = 3; 4338 iemHlpLoadSelectorInV86Mode(&p Ctx->es, uNewES);4339 iemHlpLoadSelectorInV86Mode(&p Ctx->cs, uNewCS);4340 iemHlpLoadSelectorInV86Mode(&p Ctx->ss, uNewSS);4341 iemHlpLoadSelectorInV86Mode(&p Ctx->ds, uNewDS);4342 iemHlpLoadSelectorInV86Mode(&p Ctx->fs, uNewFS);4343 iemHlpLoadSelectorInV86Mode(&p Ctx->gs, uNewGS);4312 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES); 4313 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS); 4314 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS); 4315 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS); 4316 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS); 4317 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS); 4344 4318 4345 4319 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */ 4346 4320 DescSS.Legacy.u = 0; 4347 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)p Ctx->ss.u32Limit;4348 DescSS.Legacy.Gen.u4LimitHigh = p Ctx->ss.u32Limit >> 16;4349 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)p Ctx->ss.u64Base;4350 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(p Ctx->ss.u64Base >> 16);4351 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(p Ctx->ss.u64Base >> 24);4321 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit; 4322 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16; 4323 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base; 4324 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16); 4325 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24); 4352 4326 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC; 4353 4327 DescSS.Legacy.Gen.u2Dpl = 3; … … 4414 4388 4415 4389 /* Commit SS. */ 4416 p Ctx->ss.Sel = uNewSS;4417 p Ctx->ss.ValidSel = uNewSS;4418 p Ctx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);4419 p Ctx->ss.u32Limit = cbLimit;4420 p Ctx->ss.u64Base = u64Base;4421 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;4422 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ss));4390 pVCpu->cpum.GstCtx.ss.Sel = uNewSS; 4391 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS; 4392 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 4393 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit; 4394 pVCpu->cpum.GstCtx.ss.u64Base = u64Base; 4395 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 4396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 4423 4397 4424 4398 /* CPL has changed, update IEM before loading rest of segments. */ … … 4428 4402 * Load the data segments for the new task. 4429 4403 */ 4430 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &p Ctx->es, uNewES);4404 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES); 4431 4405 if (rcStrict != VINF_SUCCESS) 4432 4406 return rcStrict; 4433 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &p Ctx->ds, uNewDS);4407 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS); 4434 4408 if (rcStrict != VINF_SUCCESS) 4435 4409 return rcStrict; 4436 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &p Ctx->fs, uNewFS);4410 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS); 4437 4411 if (rcStrict != VINF_SUCCESS) 4438 4412 return rcStrict; 4439 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &p Ctx->gs, uNewGS);4413 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS); 4440 4414 if (rcStrict != VINF_SUCCESS) 4441 4415 return rcStrict; … … 4506 4480 4507 4481 /* Commit CS. */ 4508 p Ctx->cs.Sel = uNewCS;4509 p Ctx->cs.ValidSel = uNewCS;4510 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);4511 p Ctx->cs.u32Limit = cbLimit;4512 p Ctx->cs.u64Base = u64Base;4513 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;4514 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->cs));4482 pVCpu->cpum.GstCtx.cs.Sel = uNewCS; 4483 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS; 4484 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 4485 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit; 4486 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 4487 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 4488 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs)); 4515 4489 } 4516 4490 … … 4546 4520 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN)) 4547 4521 { 4548 if ( p Ctx->esp - 1 > cbLimitSS4549 || p Ctx->esp < cbStackFrame)4522 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS 4523 || pVCpu->cpum.GstCtx.esp < cbStackFrame) 4550 4524 { 4551 4525 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */ 4552 4526 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", 4553 p Ctx->ss.Sel, pCtx->esp, cbStackFrame));4527 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame)); 4554 4528 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt); 4555 4529 } … … 4557 4531 else 4558 4532 { 4559 if ( p Ctx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))4560 || p Ctx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))4533 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)) 4534 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1)) 4561 4535 { 4562 4536 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", 4563 p Ctx->ss.Sel, pCtx->esp, cbStackFrame));4537 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame)); 4564 4538 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt); 4565 4539 } … … 4580 4554 4581 4555 /* Check the new EIP against the new CS limit. */ 4582 if (p Ctx->eip > pCtx->cs.u32Limit)4556 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit) 4583 4557 { 4584 4558 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n", 4585 p Ctx->eip, pCtx->cs.u32Limit));4559 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit)); 4586 4560 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */ 4587 4561 return iemRaiseGeneralProtectionFault(pVCpu, uExt); 4588 4562 } 4589 4563 4590 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", p Ctx->cs.Sel, pCtx->eip, pCtx->ss.Sel));4564 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel)); 4591 4565 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; 4592 4566 } … … 4598 4572 * @returns VBox strict status code. 4599 4573 * @param pVCpu The cross context virtual CPU structure of the calling thread. 4600 * @param pCtx The CPU context.4601 4574 * @param cbInstr The number of bytes to offset rIP by in the return 4602 4575 * address. … … 4608 4581 IEM_STATIC VBOXSTRICTRC 4609 4582 iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu, 4610 PCPUMCTX pCtx,4611 4583 uint8_t cbInstr, 4612 4584 uint8_t u8Vector, … … 4615 4587 uint64_t uCr2) 4616 4588 { 4617 IEM_CTX_ASSERT(p Ctx, IEM_CPUMCTX_EXTRN_XCPT_MASK);4589 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 4618 4590 4619 4591 /* 4620 4592 * Read the IDT entry. 4621 4593 */ 4622 if (p Ctx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)4623 { 4624 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, p Ctx->idtr.cbIdt));4594 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7) 4595 { 4596 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt)); 4625 4597 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 4626 4598 } 4627 4599 X86DESC Idte; 4628 4600 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX, 4629 p Ctx->idtr.pIdt + UINT32_C(8) * u8Vector);4601 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector); 4630 4602 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 4631 4603 { … … 4749 4721 4750 4722 /* Do the actual task switch. */ 4751 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);4723 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS); 4752 4724 } 4753 4725 … … 4815 4787 4816 4788 /* Calc the flag image to push. */ 4817 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu , pCtx);4789 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu); 4818 4790 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) 4819 4791 fEfl &= ~X86_EFL_RF; … … 4838 4810 RTSEL NewSS; 4839 4811 uint32_t uNewEsp; 4840 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx,uNewCpl, &NewSS, &uNewEsp);4812 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp); 4841 4813 if (rcStrict != VINF_SUCCESS) 4842 4814 return rcStrict; 4843 4815 4844 4816 IEMSELDESC DescSS; 4845 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx,NewSS, uNewCpl, &DescSS);4817 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS); 4846 4818 if (rcStrict != VINF_SUCCESS) 4847 4819 return rcStrict; … … 4853 4825 } 4854 4826 4855 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, p Ctx->ss.Sel, pCtx->esp));4827 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp)); 4856 4828 4857 4829 /* Check that there is sufficient space for the stack frame. */ … … 4901 4873 if (fFlags & IEM_XCPT_FLAGS_ERR) 4902 4874 *uStackFrame.pu32++ = uErr; 4903 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? p Ctx->eip + cbInstr : pCtx->eip;4904 uStackFrame.pu32[1] = (p Ctx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;4875 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip; 4876 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; 4905 4877 uStackFrame.pu32[2] = fEfl; 4906 uStackFrame.pu32[3] = p Ctx->esp;4907 uStackFrame.pu32[4] = p Ctx->ss.Sel;4908 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", p Ctx->ss.Sel, pCtx->esp));4878 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp; 4879 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel; 4880 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp)); 4909 4881 if (fEfl & X86_EFL_VM) 4910 4882 { 4911 uStackFrame.pu32[1] = p Ctx->cs.Sel;4912 uStackFrame.pu32[5] = p Ctx->es.Sel;4913 uStackFrame.pu32[6] = p Ctx->ds.Sel;4914 uStackFrame.pu32[7] = p Ctx->fs.Sel;4915 uStackFrame.pu32[8] = p Ctx->gs.Sel;4883 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; 4884 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel; 4885 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel; 4886 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel; 4887 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel; 4916 4888 } 4917 4889 } … … 4920 4892 if (fFlags & IEM_XCPT_FLAGS_ERR) 4921 4893 *uStackFrame.pu16++ = uErr; 4922 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? p Ctx->ip + cbInstr : pCtx->ip;4923 uStackFrame.pu16[1] = (p Ctx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;4894 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip; 4895 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; 4924 4896 uStackFrame.pu16[2] = fEfl; 4925 uStackFrame.pu16[3] = p Ctx->sp;4926 uStackFrame.pu16[4] = p Ctx->ss.Sel;4927 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", p Ctx->ss.Sel, pCtx->sp));4897 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp; 4898 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel; 4899 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp)); 4928 4900 if (fEfl & X86_EFL_VM) 4929 4901 { 4930 uStackFrame.pu16[1] = p Ctx->cs.Sel;4931 uStackFrame.pu16[5] = p Ctx->es.Sel;4932 uStackFrame.pu16[6] = p Ctx->ds.Sel;4933 uStackFrame.pu16[7] = p Ctx->fs.Sel;4934 uStackFrame.pu16[8] = p Ctx->gs.Sel;4902 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel; 4903 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel; 4904 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel; 4905 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel; 4906 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel; 4935 4907 } 4936 4908 } … … 4962 4934 * Start comitting the register changes (joins with the DPL=CPL branch). 4963 4935 */ 4964 p Ctx->ss.Sel = NewSS;4965 p Ctx->ss.ValidSel = NewSS;4966 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;4967 p Ctx->ss.u32Limit = cbLimitSS;4968 p Ctx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);4969 p Ctx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);4936 pVCpu->cpum.GstCtx.ss.Sel = NewSS; 4937 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS; 4938 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 4939 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS; 4940 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 4941 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 4970 4942 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and 4971 4943 * 16-bit handler, the high word of ESP remains unchanged (i.e. only … … 4974 4946 * - 16-bit TSS, 32-bit handler 4975 4947 * - 32-bit TSS, 16-bit handler */ 4976 if (!p Ctx->ss.Attr.n.u1DefBig)4977 p Ctx->sp = (uint16_t)(uNewEsp - cbStackFrame);4948 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 4949 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame); 4978 4950 else 4979 p Ctx->rsp = uNewEsp - cbStackFrame;4951 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame; 4980 4952 4981 4953 if (fEfl & X86_EFL_VM) 4982 4954 { 4983 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &p Ctx->gs);4984 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &p Ctx->fs);4985 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &p Ctx->es);4986 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &p Ctx->ds);4955 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs); 4956 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs); 4957 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es); 4958 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds); 4987 4959 } 4988 4960 } … … 5004 4976 if (fFlags & IEM_XCPT_FLAGS_ERR) 5005 4977 *uStackFrame.pu32++ = uErr; 5006 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? p Ctx->eip + cbInstr : pCtx->eip;5007 uStackFrame.pu32[1] = (p Ctx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;4978 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip; 4979 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl; 5008 4980 uStackFrame.pu32[2] = fEfl; 5009 4981 } … … 5012 4984 if (fFlags & IEM_XCPT_FLAGS_ERR) 5013 4985 *uStackFrame.pu16++ = uErr; 5014 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? p Ctx->eip + cbInstr : pCtx->eip;5015 uStackFrame.pu16[1] = (p Ctx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;4986 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip; 4987 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl; 5016 4988 uStackFrame.pu16[2] = fEfl; 5017 4989 } … … 5032 5004 * Start committing the register changes (joins with the other branch). 5033 5005 */ 5034 p Ctx->rsp = uNewRsp;5006 pVCpu->cpum.GstCtx.rsp = uNewRsp; 5035 5007 } 5036 5008 5037 5009 /* ... register committing continues. */ 5038 p Ctx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;5039 p Ctx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;5040 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;5041 p Ctx->cs.u32Limit = cbLimitCS;5042 p Ctx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);5043 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);5044 5045 p Ctx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */5010 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 5011 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 5012 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 5013 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS; 5014 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 5015 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 5016 5017 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */ 5046 5018 fEfl &= ~fEflToClear; 5047 IEMMISC_SET_EFL(pVCpu, pCtx,fEfl);5019 IEMMISC_SET_EFL(pVCpu, fEfl); 5048 5020 5049 5021 if (fFlags & IEM_XCPT_FLAGS_CR2) 5050 p Ctx->cr2 = uCr2;5022 pVCpu->cpum.GstCtx.cr2 = uCr2; 5051 5023 5052 5024 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT) 5053 iemRaiseXcptAdjustState(p Ctx, u8Vector);5025 iemRaiseXcptAdjustState(pVCpu, u8Vector); 5054 5026 5055 5027 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; … … 5062 5034 * @returns VBox strict status code. 5063 5035 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5064 * @param pCtx The CPU context.5065 5036 * @param cbInstr The number of bytes to offset rIP by in the return 5066 5037 * address. … … 5072 5043 IEM_STATIC VBOXSTRICTRC 5073 5044 iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu, 5074 PCPUMCTX pCtx,5075 5045 uint8_t cbInstr, 5076 5046 uint8_t u8Vector, … … 5079 5049 uint64_t uCr2) 5080 5050 { 5081 IEM_CTX_ASSERT(p Ctx, IEM_CPUMCTX_EXTRN_XCPT_MASK);5051 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 5082 5052 5083 5053 /* … … 5085 5055 */ 5086 5056 uint16_t offIdt = (uint16_t)u8Vector << 4; 5087 if (p Ctx->idtr.cbIdt < offIdt + 7)5088 { 5089 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, p Ctx->idtr.cbIdt));5057 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7) 5058 { 5059 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt)); 5090 5060 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT)); 5091 5061 } 5092 5062 X86DESC64 Idte; 5093 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, p Ctx->idtr.pIdt + offIdt);5063 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt); 5094 5064 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 5095 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, p Ctx->idtr.pIdt + offIdt + 8);5065 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8); 5096 5066 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 5097 5067 { … … 5216 5186 || Idte.Gate.u3IST != 0) 5217 5187 { 5218 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx,uNewCpl, Idte.Gate.u3IST, &uNewRsp);5188 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp); 5219 5189 if (rcStrict != VINF_SUCCESS) 5220 5190 return rcStrict; 5221 5191 } 5222 5192 else 5223 uNewRsp = p Ctx->rsp;5193 uNewRsp = pVCpu->cpum.GstCtx.rsp; 5224 5194 uNewRsp &= ~(uint64_t)0xf; 5225 5195 … … 5227 5197 * Calc the flag image to push. 5228 5198 */ 5229 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu , pCtx);5199 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu); 5230 5200 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) 5231 5201 fEfl &= ~X86_EFL_RF; … … 5251 5221 if (fFlags & IEM_XCPT_FLAGS_ERR) 5252 5222 *uStackFrame.pu64++ = uErr; 5253 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? p Ctx->rip + cbInstr : pCtx->rip;5254 uStackFrame.pu64[1] = (p Ctx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */5223 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip; 5224 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */ 5255 5225 uStackFrame.pu64[2] = fEfl; 5256 uStackFrame.pu64[3] = p Ctx->rsp;5257 uStackFrame.pu64[4] = p Ctx->ss.Sel;5226 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp; 5227 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel; 5258 5228 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); 5259 5229 if (rcStrict != VINF_SUCCESS) … … 5279 5249 if (uNewCpl != uOldCpl) 5280 5250 { 5281 p Ctx->ss.Sel = 0 | uNewCpl;5282 p Ctx->ss.ValidSel = 0 | uNewCpl;5283 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;5284 p Ctx->ss.u32Limit = UINT32_MAX;5285 p Ctx->ss.u64Base = 0;5286 p Ctx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;5287 } 5288 p Ctx->rsp = uNewRsp - cbStackFrame;5289 p Ctx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;5290 p Ctx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;5291 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;5292 p Ctx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);5293 p Ctx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);5294 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);5295 p Ctx->rip = uNewRip;5251 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl; 5252 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl; 5253 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 5254 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX; 5255 pVCpu->cpum.GstCtx.ss.u64Base = 0; 5256 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE; 5257 } 5258 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame; 5259 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 5260 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl; 5261 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 5262 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy); 5263 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 5264 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 5265 pVCpu->cpum.GstCtx.rip = uNewRip; 5296 5266 5297 5267 fEfl &= ~fEflToClear; 5298 IEMMISC_SET_EFL(pVCpu, pCtx,fEfl);5268 IEMMISC_SET_EFL(pVCpu, fEfl); 5299 5269 5300 5270 if (fFlags & IEM_XCPT_FLAGS_CR2) 5301 p Ctx->cr2 = uCr2;5271 pVCpu->cpum.GstCtx.cr2 = uCr2; 5302 5272 5303 5273 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT) 5304 iemRaiseXcptAdjustState(p Ctx, u8Vector);5274 iemRaiseXcptAdjustState(pVCpu, u8Vector); 5305 5275 5306 5276 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; … … 5330 5300 uint64_t uCr2) 5331 5301 { 5332 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5333 5334 5302 /* 5335 5303 * Get all the state that we might need here. 5336 5304 */ 5337 5305 #ifdef IN_RING0 5338 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);5306 int rc = HMR0EnsureCompleteBasicContext(pVCpu, IEM_GET_CTX(pVCpu)); 5339 5307 AssertRCReturn(rc, rc); 5340 5308 #endif 5341 IEM_CTX_IMPORT_RET(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_XCPT_MASK);5342 IEM_CTX_ASSERT(p Ctx, IEM_CPUMCTX_EXTRN_XCPT_MASK);5309 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 5310 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 5343 5311 5344 5312 #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */ … … 5352 5320 * Perform the V8086 IOPL check and upgrade the fault without nesting. 5353 5321 */ 5354 if ( p Ctx->eflags.Bits.u1VM5355 && p Ctx->eflags.Bits.u2IOPL != 35322 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM 5323 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3 5356 5324 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT 5357 && (p Ctx->cr0 & X86_CR0_PE) )5325 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) ) 5358 5326 { 5359 5327 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector)); … … 5365 5333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx", 5366 5334 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2, 5367 p Ctx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);5335 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp); 5368 5336 #endif 5369 5337 5370 5338 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5371 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))5339 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5372 5340 { 5373 5341 /* … … 5377 5345 * See AMD spec. 15.20 "Event Injection". 5378 5346 */ 5379 if (!p Ctx->hwvirt.svm.fInterceptEvents)5380 p Ctx->hwvirt.svm.fInterceptEvents = 1;5347 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents) 5348 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1; 5381 5349 else 5382 5350 { … … 5384 5352 * Check and handle if the event being raised is intercepted. 5385 5353 */ 5386 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx,u8Vector, fFlags, uErr, uCr2);5354 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2); 5387 5355 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE) 5388 5356 return rcStrict0; … … 5398 5366 if (pVCpu->iem.s.cXcptRecursions == 0) 5399 5367 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n", 5400 u8Vector, p Ctx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));5368 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2)); 5401 5369 else 5402 5370 { 5403 5371 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n", 5404 u8Vector, p Ctx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,5372 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, 5405 5373 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt)); 5406 5374 … … 5439 5407 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */ 5440 5408 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n")); 5441 if (!CPUMIsGuestInNestedHwVirtMode( pCtx))5409 if (!CPUMIsGuestInNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5442 5410 return VERR_EM_GUEST_CPU_HANG; 5443 5411 } … … 5521 5489 */ 5522 5490 VBOXSTRICTRC rcStrict; 5523 if (!(p Ctx->cr0 & X86_CR0_PE))5524 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx,cbInstr, u8Vector, fFlags, uErr, uCr2);5525 else if (p Ctx->msrEFER & MSR_K6_EFER_LMA)5526 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx,cbInstr, u8Vector, fFlags, uErr, uCr2);5491 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 5492 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2); 5493 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA) 5494 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2); 5527 5495 else 5528 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx,cbInstr, u8Vector, fFlags, uErr, uCr2);5496 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2); 5529 5497 5530 5498 /* Flush the prefetch buffer. */ … … 5542 5510 pVCpu->iem.s.fCurXcpt = fPrevXcpt; 5543 5511 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n", 5544 VBOXSTRICTRC_VAL(rcStrict), u8Vector, p Ctx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl,5512 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl, 5545 5513 pVCpu->iem.s.cXcptRecursions + 1)); 5546 5514 return rcStrict; … … 6068 6036 { 6069 6037 Assert(iSegReg < X86_SREG_COUNT); 6070 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6071 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6072 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg]; 6038 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6039 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg]; 6073 6040 6074 6041 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 … … 6115 6082 { 6116 6083 Assert(iSegReg < X86_SREG_COUNT); 6117 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6118 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6119 return &pCtx->aSRegs[iSegReg].Sel; 6084 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6085 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel; 6120 6086 } 6121 6087 … … 6131 6097 { 6132 6098 Assert(iSegReg < X86_SREG_COUNT); 6133 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6134 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6135 return pCtx->aSRegs[iSegReg].Sel; 6099 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6100 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel; 6136 6101 } 6137 6102 … … 6147 6112 { 6148 6113 Assert(iSegReg < X86_SREG_COUNT); 6149 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6150 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6151 return pCtx->aSRegs[iSegReg].u64Base; 6114 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6115 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base; 6152 6116 } 6153 6117 … … 6163 6127 { 6164 6128 Assert(iReg < 16); 6165 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6166 return &pCtx->aGRegs[iReg]; 6129 return &pVCpu->cpum.GstCtx.aGRegs[iReg]; 6167 6130 } 6168 6131 … … 6179 6142 DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg) 6180 6143 { 6181 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6182 6144 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) 6183 6145 { 6184 6146 Assert(iReg < 16); 6185 return &p Ctx->aGRegs[iReg].u8;6147 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8; 6186 6148 } 6187 6149 /* high 8-bit register. */ 6188 6150 Assert(iReg < 8); 6189 return &p Ctx->aGRegs[iReg & 3].bHi;6151 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi; 6190 6152 } 6191 6153 … … 6201 6163 { 6202 6164 Assert(iReg < 16); 6203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6204 return &pCtx->aGRegs[iReg].u16; 6165 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16; 6205 6166 } 6206 6167 … … 6216 6177 { 6217 6178 Assert(iReg < 16); 6218 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6219 return &pCtx->aGRegs[iReg].u32; 6179 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32; 6220 6180 } 6221 6181 … … 6231 6191 { 6232 6192 Assert(iReg < 64); 6233 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6234 return &pCtx->aGRegs[iReg].u64; 6193 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64; 6235 6194 } 6236 6195 … … 6246 6205 { 6247 6206 Assert(iSegReg < X86_SREG_COUNT); 6248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6249 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6250 return &pCtx->aSRegs[iSegReg].u64Base; 6207 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 6208 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base; 6251 6209 } 6252 6210 … … 6318 6276 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr) 6319 6277 { 6320 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6321 6278 switch (pVCpu->iem.s.enmEffOpSize) 6322 6279 { 6323 6280 case IEMMODE_16BIT: 6324 6281 { 6325 uint16_t uNewIp = p Ctx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);6326 if ( uNewIp > p Ctx->cs.u32Limit6282 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 6283 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit 6327 6284 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ 6328 6285 return iemRaiseGeneralProtectionFault0(pVCpu); 6329 p Ctx->rip = uNewIp;6286 pVCpu->cpum.GstCtx.rip = uNewIp; 6330 6287 break; 6331 6288 } … … 6333 6290 case IEMMODE_32BIT: 6334 6291 { 6335 Assert(p Ctx->rip <= UINT32_MAX);6292 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 6336 6293 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 6337 6294 6338 uint32_t uNewEip = p Ctx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);6339 if (uNewEip > p Ctx->cs.u32Limit)6295 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 6296 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit) 6340 6297 return iemRaiseGeneralProtectionFault0(pVCpu); 6341 p Ctx->rip = uNewEip;6298 pVCpu->cpum.GstCtx.rip = uNewEip; 6342 6299 break; 6343 6300 } … … 6347 6304 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 6348 6305 6349 uint64_t uNewRip = p Ctx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);6306 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 6350 6307 if (!IEM_IS_CANONICAL(uNewRip)) 6351 6308 return iemRaiseGeneralProtectionFault0(pVCpu); 6352 p Ctx->rip = uNewRip;6309 pVCpu->cpum.GstCtx.rip = uNewRip; 6353 6310 break; 6354 6311 } … … 6357 6314 } 6358 6315 6359 p Ctx->eflags.Bits.u1RF = 0;6316 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 6360 6317 6361 6318 #ifndef IEM_WITH_CODE_TLB … … 6380 6337 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr) 6381 6338 { 6382 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6383 6339 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT); 6384 6340 6385 uint16_t uNewIp = p Ctx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);6386 if ( uNewIp > p Ctx->cs.u32Limit6341 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 6342 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit 6387 6343 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ 6388 6344 return iemRaiseGeneralProtectionFault0(pVCpu); 6389 6345 /** @todo Test 16-bit jump in 64-bit mode. possible? */ 6390 p Ctx->rip = uNewIp;6391 p Ctx->eflags.Bits.u1RF = 0;6346 pVCpu->cpum.GstCtx.rip = uNewIp; 6347 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 6392 6348 6393 6349 #ifndef IEM_WITH_CODE_TLB … … 6412 6368 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr) 6413 6369 { 6414 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6415 6370 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT); 6416 6371 6417 6372 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT) 6418 6373 { 6419 Assert(p Ctx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);6420 6421 uint32_t uNewEip = p Ctx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);6422 if (uNewEip > p Ctx->cs.u32Limit)6374 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 6375 6376 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 6377 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit) 6423 6378 return iemRaiseGeneralProtectionFault0(pVCpu); 6424 p Ctx->rip = uNewEip;6379 pVCpu->cpum.GstCtx.rip = uNewEip; 6425 6380 } 6426 6381 else … … 6428 6383 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 6429 6384 6430 uint64_t uNewRip = p Ctx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);6385 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu); 6431 6386 if (!IEM_IS_CANONICAL(uNewRip)) 6432 6387 return iemRaiseGeneralProtectionFault0(pVCpu); 6433 p Ctx->rip = uNewRip;6434 } 6435 p Ctx->eflags.Bits.u1RF = 0;6388 pVCpu->cpum.GstCtx.rip = uNewRip; 6389 } 6390 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 6436 6391 6437 6392 #ifndef IEM_WITH_CODE_TLB … … 6455 6410 IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip) 6456 6411 { 6457 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6458 6412 switch (pVCpu->iem.s.enmEffOpSize) 6459 6413 { … … 6461 6415 { 6462 6416 Assert(uNewRip <= UINT16_MAX); 6463 if ( uNewRip > p Ctx->cs.u32Limit6417 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit 6464 6418 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */ 6465 6419 return iemRaiseGeneralProtectionFault0(pVCpu); 6466 6420 /** @todo Test 16-bit jump in 64-bit mode. */ 6467 p Ctx->rip = uNewRip;6421 pVCpu->cpum.GstCtx.rip = uNewRip; 6468 6422 break; 6469 6423 } … … 6472 6426 { 6473 6427 Assert(uNewRip <= UINT32_MAX); 6474 Assert(p Ctx->rip <= UINT32_MAX);6428 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 6475 6429 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 6476 6430 6477 if (uNewRip > p Ctx->cs.u32Limit)6431 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit) 6478 6432 return iemRaiseGeneralProtectionFault0(pVCpu); 6479 p Ctx->rip = uNewRip;6433 pVCpu->cpum.GstCtx.rip = uNewRip; 6480 6434 break; 6481 6435 } … … 6487 6441 if (!IEM_IS_CANONICAL(uNewRip)) 6488 6442 return iemRaiseGeneralProtectionFault0(pVCpu); 6489 p Ctx->rip = uNewRip;6443 pVCpu->cpum.GstCtx.rip = uNewRip; 6490 6444 break; 6491 6445 } … … 6494 6448 } 6495 6449 6496 p Ctx->eflags.Bits.u1RF = 0;6450 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 6497 6451 6498 6452 #ifndef IEM_WITH_CODE_TLB … … 6509 6463 * 6510 6464 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6511 * @param pCtx The CPU context which SP/ESP/RSP should be 6512 * read. 6513 */ 6514 DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx) 6465 */ 6466 DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) 6515 6467 { 6516 6468 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6517 return p Ctx->rsp;6518 if (p Ctx->ss.Attr.n.u1DefBig)6519 return p Ctx->esp;6520 return p Ctx->sp;6469 return pVCpu->cpum.GstCtx.rsp; 6470 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6471 return pVCpu->cpum.GstCtx.esp; 6472 return pVCpu->cpum.GstCtx.sp; 6521 6473 } 6522 6474 … … 6532 6484 IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr) 6533 6485 { 6534 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6535 6486 switch (pVCpu->iem.s.enmCpuMode) 6536 6487 { 6537 6488 case IEMMODE_16BIT: 6538 Assert(p Ctx->rip <= UINT16_MAX);6539 p Ctx->eip += cbInstr;6540 p Ctx->eip &= UINT32_C(0xffff);6489 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX); 6490 pVCpu->cpum.GstCtx.eip += cbInstr; 6491 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff); 6541 6492 break; 6542 6493 6543 6494 case IEMMODE_32BIT: 6544 p Ctx->eip += cbInstr;6545 Assert(p Ctx->rip <= UINT32_MAX);6495 pVCpu->cpum.GstCtx.eip += cbInstr; 6496 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 6546 6497 break; 6547 6498 6548 6499 case IEMMODE_64BIT: 6549 p Ctx->rip += cbInstr;6500 pVCpu->cpum.GstCtx.rip += cbInstr; 6550 6501 break; 6551 6502 default: AssertFailed(); … … 6576 6527 IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr) 6577 6528 { 6578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6579 6580 pCtx->eflags.Bits.u1RF = 0; 6529 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 6581 6530 6582 6531 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); 6583 6532 #if ARCH_BITS >= 64 6584 6533 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX }; 6585 Assert(p Ctx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);6586 p Ctx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];6534 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]); 6535 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]; 6587 6536 #else 6588 6537 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6589 p Ctx->rip += cbInstr;6538 pVCpu->cpum.GstCtx.rip += cbInstr; 6590 6539 else 6591 p Ctx->eip += cbInstr;6540 pVCpu->cpum.GstCtx.eip += cbInstr; 6592 6541 #endif 6593 6542 } … … 6609 6558 * 6610 6559 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6611 * @param pCtx The CPU context which SP/ESP/RSP should be6612 * updated.6613 6560 * @param cbToAdd The number of bytes to add (8-bit!). 6614 6561 */ 6615 DECLINLINE(void) iemRegAddToRsp(P CVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)6562 DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd) 6616 6563 { 6617 6564 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6618 p Ctx->rsp += cbToAdd;6619 else if (p Ctx->ss.Attr.n.u1DefBig)6620 p Ctx->esp += cbToAdd;6565 pVCpu->cpum.GstCtx.rsp += cbToAdd; 6566 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6567 pVCpu->cpum.GstCtx.esp += cbToAdd; 6621 6568 else 6622 p Ctx->sp += cbToAdd;6569 pVCpu->cpum.GstCtx.sp += cbToAdd; 6623 6570 } 6624 6571 … … 6628 6575 * 6629 6576 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6630 * @param pCtx The CPU context which SP/ESP/RSP should be6631 * updated.6632 6577 * @param cbToSub The number of bytes to subtract (8-bit!). 6633 6578 */ 6634 DECLINLINE(void) iemRegSubFromRsp(P CVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)6579 DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub) 6635 6580 { 6636 6581 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6637 p Ctx->rsp -= cbToSub;6638 else if (p Ctx->ss.Attr.n.u1DefBig)6639 p Ctx->esp -= cbToSub;6582 pVCpu->cpum.GstCtx.rsp -= cbToSub; 6583 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6584 pVCpu->cpum.GstCtx.esp -= cbToSub; 6640 6585 else 6641 p Ctx->sp -= cbToSub;6586 pVCpu->cpum.GstCtx.sp -= cbToSub; 6642 6587 } 6643 6588 … … 6649 6594 * @param pTmpRsp The temporary SP/ESP/RSP to update. 6650 6595 * @param cbToAdd The number of bytes to add (16-bit). 6651 * @param pCtx Where to get the current stack mode. 6652 */ 6653 DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd) 6596 */ 6597 DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) 6654 6598 { 6655 6599 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6656 6600 pTmpRsp->u += cbToAdd; 6657 else if (p Ctx->ss.Attr.n.u1DefBig)6601 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6658 6602 pTmpRsp->DWords.dw0 += cbToAdd; 6659 6603 else … … 6668 6612 * @param pTmpRsp The temporary SP/ESP/RSP to update. 6669 6613 * @param cbToSub The number of bytes to subtract. 6670 * @param pCtx Where to get the current stack mode.6671 6614 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is 6672 6615 * expecting that. 6673 6616 */ 6674 DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, P CCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)6617 DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) 6675 6618 { 6676 6619 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6677 6620 pTmpRsp->u -= cbToSub; 6678 else if (p Ctx->ss.Attr.n.u1DefBig)6621 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6679 6622 pTmpRsp->DWords.dw0 -= cbToSub; 6680 6623 else … … 6689 6632 * @returns Effective stack addressf for the push. 6690 6633 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6691 * @param pCtx Where to get the current stack mode.6692 6634 * @param cbItem The size of the stack item to pop. 6693 6635 * @param puNewRsp Where to return the new RSP value. 6694 6636 */ 6695 DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t cbItem, uint64_t *puNewRsp)6637 DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) 6696 6638 { 6697 6639 RTUINT64U uTmpRsp; 6698 6640 RTGCPTR GCPtrTop; 6699 uTmpRsp.u = p Ctx->rsp;6641 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp; 6700 6642 6701 6643 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6702 6644 GCPtrTop = uTmpRsp.u -= cbItem; 6703 else if (p Ctx->ss.Attr.n.u1DefBig)6645 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6704 6646 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem; 6705 6647 else … … 6716 6658 * @returns Current stack pointer. 6717 6659 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6718 * @param pCtx Where to get the current stack mode.6719 6660 * @param cbItem The size of the stack item to pop. 6720 6661 * @param puNewRsp Where to return the new RSP value. 6721 6662 */ 6722 DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx,uint8_t cbItem, uint64_t *puNewRsp)6663 DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) 6723 6664 { 6724 6665 RTUINT64U uTmpRsp; 6725 6666 RTGCPTR GCPtrTop; 6726 uTmpRsp.u = p Ctx->rsp;6667 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp; 6727 6668 6728 6669 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) … … 6731 6672 uTmpRsp.u += cbItem; 6732 6673 } 6733 else if (p Ctx->ss.Attr.n.u1DefBig)6674 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6734 6675 { 6735 6676 GCPtrTop = uTmpRsp.DWords.dw0; … … 6752 6693 * @returns Effective stack addressf for the push. 6753 6694 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6754 * @param pCtx Where to get the current stack mode.6755 6695 * @param pTmpRsp The temporary stack pointer. This is updated. 6756 6696 * @param cbItem The size of the stack item to pop. 6757 6697 */ 6758 DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, P CCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)6698 DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) 6759 6699 { 6760 6700 RTGCPTR GCPtrTop; … … 6762 6702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 6763 6703 GCPtrTop = pTmpRsp->u -= cbItem; 6764 else if (p Ctx->ss.Attr.n.u1DefBig)6704 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6765 6705 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem; 6766 6706 else … … 6776 6716 * @returns Current stack pointer. 6777 6717 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6778 * @param pCtx Where to get the current stack mode.6779 6718 * @param pTmpRsp The temporary stack pointer. This is updated. 6780 6719 * @param cbItem The size of the stack item to pop. 6781 6720 */ 6782 DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, P CCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)6721 DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) 6783 6722 { 6784 6723 RTGCPTR GCPtrTop; … … 6788 6727 pTmpRsp->u += cbItem; 6789 6728 } 6790 else if (p Ctx->ss.Attr.n.u1DefBig)6729 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 6791 6730 { 6792 6731 GCPtrTop = pTmpRsp->DWords.dw0; … … 6824 6763 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu); 6825 6764 #endif 6826 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6827 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6765 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6828 6766 } 6829 6767 … … 6869 6807 CPUMRZFpuStateActualizeForRead(pVCpu); 6870 6808 #endif 6871 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6872 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6809 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6873 6810 } 6874 6811 … … 6888 6825 CPUMRZFpuStateActualizeForChange(pVCpu); 6889 6826 #endif 6890 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6891 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6827 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6892 6828 } 6893 6829 … … 6908 6844 CPUMRZFpuStateActualizeSseForRead(pVCpu); 6909 6845 #endif 6910 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6911 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6846 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6912 6847 } 6913 6848 … … 6928 6863 CPUMRZFpuStateActualizeForChange(pVCpu); 6929 6864 #endif 6930 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6931 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6865 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6932 6866 } 6933 6867 … … 6948 6882 CPUMRZFpuStateActualizeAvxForRead(pVCpu); 6949 6883 #endif 6950 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6951 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6884 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6952 6885 } 6953 6886 … … 6968 6901 CPUMRZFpuStateActualizeForChange(pVCpu); 6969 6902 #endif 6970 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6971 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6903 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 6972 6904 } 6973 6905 … … 6990 6922 * 6991 6923 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6992 * @param pCtx The CPU context.6993 6924 * @param pFpuCtx The FPU context. 6994 6925 */ 6995 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, P CPUMCTX pCtx, PX86FXSTATE pFpuCtx)6926 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) 6996 6927 { 6997 6928 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX); … … 7003 6934 * happens in real mode here based on the fnsave and fnstenv images. */ 7004 6935 pFpuCtx->CS = 0; 7005 pFpuCtx->FPUIP = p Ctx->eip | ((uint32_t)pCtx->cs.Sel << 4);6936 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4); 7006 6937 } 7007 6938 else 7008 6939 { 7009 pFpuCtx->CS = p Ctx->cs.Sel;7010 pFpuCtx->FPUIP = p Ctx->rip;6940 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel; 6941 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip; 7011 6942 } 7012 6943 } … … 7017 6948 * 7018 6949 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7019 * @param pCtx The CPU context.7020 6950 * @param pFpuCtx The FPU context. 7021 6951 * @param iEffSeg The effective segment register. 7022 6952 * @param GCPtrEff The effective address relative to @a iEffSeg. 7023 6953 */ 7024 DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, P CPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)6954 DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7025 6955 { 7026 6956 RTSEL sel; 7027 6957 switch (iEffSeg) 7028 6958 { 7029 case X86_SREG_DS: sel = p Ctx->ds.Sel; break;7030 case X86_SREG_SS: sel = p Ctx->ss.Sel; break;7031 case X86_SREG_CS: sel = p Ctx->cs.Sel; break;7032 case X86_SREG_ES: sel = p Ctx->es.Sel; break;7033 case X86_SREG_FS: sel = p Ctx->fs.Sel; break;7034 case X86_SREG_GS: sel = p Ctx->gs.Sel; break;6959 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break; 6960 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break; 6961 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break; 6962 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break; 6963 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break; 6964 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break; 7035 6965 default: 7036 6966 AssertMsgFailed(("%d\n", iEffSeg)); 7037 sel = p Ctx->ds.Sel;6967 sel = pVCpu->cpum.GstCtx.ds.Sel; 7038 6968 } 7039 6969 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */ … … 7209 7139 IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult) 7210 7140 { 7211 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7212 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7213 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7141 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7142 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7214 7143 iemFpuMaybePushResult(pResult, pFpuCtx); 7215 7144 } … … 7227 7156 IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7228 7157 { 7229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7230 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7231 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7232 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7158 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7159 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7160 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7233 7161 iemFpuMaybePushResult(pResult, pFpuCtx); 7234 7162 } … … 7244 7172 IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult) 7245 7173 { 7246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7247 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7248 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7174 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7175 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7249 7176 7250 7177 /* Update FSW and bail if there are pending exceptions afterwards. */ … … 7299 7226 IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) 7300 7227 { 7301 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7302 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7303 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7228 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7229 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7304 7230 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 7305 7231 } … … 7316 7242 IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) 7317 7243 { 7318 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7319 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7320 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7244 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7321 7246 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 7322 7247 iemFpuMaybePopOne(pFpuCtx); … … 7337 7262 uint8_t iEffSeg, RTGCPTR GCPtrEff) 7338 7263 { 7339 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7340 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7341 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7342 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7264 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7265 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7266 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7343 7267 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 7344 7268 } … … 7358 7282 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7359 7283 { 7360 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7361 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7362 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7363 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7284 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7285 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7286 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7364 7287 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg); 7365 7288 iemFpuMaybePopOne(pFpuCtx); … … 7374 7297 IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu) 7375 7298 { 7376 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7377 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7378 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7299 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7300 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7379 7301 } 7380 7302 … … 7437 7359 IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW) 7438 7360 { 7439 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7440 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7441 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7361 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7362 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7442 7363 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 7443 7364 } … … 7452 7373 IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW) 7453 7374 { 7454 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7455 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7456 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7375 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7457 7377 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 7458 7378 iemFpuMaybePopOne(pFpuCtx); … … 7470 7390 IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7471 7391 { 7472 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7473 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7474 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7475 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7392 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7393 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7394 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7476 7395 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 7477 7396 } … … 7486 7405 IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW) 7487 7406 { 7488 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7489 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7490 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7408 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7491 7409 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 7492 7410 iemFpuMaybePopOne(pFpuCtx); … … 7505 7423 IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7506 7424 { 7507 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7508 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7509 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7510 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7425 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7426 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7427 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7511 7428 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW); 7512 7429 iemFpuMaybePopOne(pFpuCtx); … … 7553 7470 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg) 7554 7471 { 7555 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7556 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7557 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7472 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7473 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7558 7474 iemFpuStackUnderflowOnly(pFpuCtx, iStReg); 7559 7475 } … … 7563 7479 iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7564 7480 { 7565 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7566 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7567 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7568 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7481 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7482 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7483 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7569 7484 iemFpuStackUnderflowOnly(pFpuCtx, iStReg); 7570 7485 } … … 7573 7488 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg) 7574 7489 { 7575 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7576 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7577 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7490 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7491 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7578 7492 iemFpuStackUnderflowOnly(pFpuCtx, iStReg); 7579 7493 iemFpuMaybePopOne(pFpuCtx); … … 7584 7498 iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7585 7499 { 7586 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7587 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7588 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7589 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7500 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7501 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7502 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7590 7503 iemFpuStackUnderflowOnly(pFpuCtx, iStReg); 7591 7504 iemFpuMaybePopOne(pFpuCtx); … … 7595 7508 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu) 7596 7509 { 7597 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7598 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7599 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7510 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7511 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7600 7512 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX); 7601 7513 iemFpuMaybePopOne(pFpuCtx); … … 7607 7519 iemFpuStackPushUnderflow(PVMCPU pVCpu) 7608 7520 { 7609 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7610 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7611 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7521 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7522 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7612 7523 7613 7524 if (pFpuCtx->FCW & X86_FCW_IM) … … 7634 7545 iemFpuStackPushUnderflowTwo(PVMCPU pVCpu) 7635 7546 { 7636 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7637 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7638 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7547 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7548 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7639 7549 7640 7550 if (pFpuCtx->FCW & X86_FCW_IM) … … 7693 7603 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu) 7694 7604 { 7695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7696 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7697 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7605 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7698 7607 iemFpuStackPushOverflowOnly(pFpuCtx); 7699 7608 } … … 7710 7619 iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) 7711 7620 { 7712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7713 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 7714 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff); 7715 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx); 7621 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7622 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff); 7623 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 7716 7624 iemFpuStackPushOverflowOnly(pFpuCtx); 7717 7625 } … … 7890 7798 iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 7891 7799 { 7892 IEM_CTX_ASSERT( IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));7800 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 7893 7801 7894 7802 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) … … 7930 7838 iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr) 7931 7839 { 7932 IEM_CTX_ASSERT( IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));7840 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 7933 7841 7934 7842 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) … … 7974 7882 return VINF_SUCCESS; 7975 7883 7976 IEM_CTX_IMPORT_RET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));7884 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 7977 7885 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 7978 7886 switch (pVCpu->iem.s.enmCpuMode) … … 9135 9043 if (iSegReg >= X86_SREG_FS) 9136 9044 { 9137 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));9045 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9138 9046 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9139 9047 GCPtrMem += pSel->u64Base; … … 9148 9056 else 9149 9057 { 9150 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));9058 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9151 9059 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9152 9060 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN)) … … 9191 9099 if (iSegReg >= X86_SREG_FS) 9192 9100 { 9193 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));9101 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9194 9102 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9195 9103 GCPtrMem += pSel->u64Base; … … 9204 9112 else 9205 9113 { 9206 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));9114 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 9207 9115 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg); 9208 9116 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE … … 10199 10107 /* Increment the stack pointer. */ 10200 10108 uint64_t uNewRsp; 10201 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10202 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp); 10109 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp); 10203 10110 10204 10111 /* Write the word the lazy way. */ … … 10213 10120 /* Commit the new RSP value unless we an access handler made trouble. */ 10214 10121 if (rc == VINF_SUCCESS) 10215 p Ctx->rsp = uNewRsp;10122 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10216 10123 10217 10124 return rc; … … 10230 10137 /* Increment the stack pointer. */ 10231 10138 uint64_t uNewRsp; 10232 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10233 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp); 10139 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp); 10234 10140 10235 10141 /* Write the dword the lazy way. */ … … 10244 10150 /* Commit the new RSP value unless we an access handler made trouble. */ 10245 10151 if (rc == VINF_SUCCESS) 10246 p Ctx->rsp = uNewRsp;10152 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10247 10153 10248 10154 return rc; … … 10261 10167 /* Increment the stack pointer. */ 10262 10168 uint64_t uNewRsp; 10263 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10264 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp); 10169 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp); 10265 10170 10266 10171 /* The intel docs talks about zero extending the selector register … … 10284 10189 /* Commit the new RSP value unless we an access handler made trouble. */ 10285 10190 if (rc == VINF_SUCCESS) 10286 p Ctx->rsp = uNewRsp;10191 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10287 10192 10288 10193 return rc; … … 10301 10206 /* Increment the stack pointer. */ 10302 10207 uint64_t uNewRsp; 10303 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10304 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp); 10208 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp); 10305 10209 10306 10210 /* Write the word the lazy way. */ … … 10315 10219 /* Commit the new RSP value unless we an access handler made trouble. */ 10316 10220 if (rc == VINF_SUCCESS) 10317 p Ctx->rsp = uNewRsp;10221 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10318 10222 10319 10223 return rc; … … 10332 10236 /* Increment the stack pointer. */ 10333 10237 uint64_t uNewRsp; 10334 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10335 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp); 10238 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp); 10336 10239 10337 10240 /* Write the word the lazy way. */ … … 10345 10248 /* Commit the new RSP value. */ 10346 10249 if (rc == VINF_SUCCESS) 10347 p Ctx->rsp = uNewRsp;10250 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10348 10251 } 10349 10252 … … 10363 10266 /* Increment the stack pointer. */ 10364 10267 uint64_t uNewRsp; 10365 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10366 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp); 10268 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp); 10367 10269 10368 10270 /* Write the word the lazy way. */ … … 10376 10278 /* Commit the new RSP value. */ 10377 10279 if (rc == VINF_SUCCESS) 10378 p Ctx->rsp = uNewRsp;10280 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10379 10281 } 10380 10282 … … 10394 10296 /* Increment the stack pointer. */ 10395 10297 uint64_t uNewRsp; 10396 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10397 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp); 10298 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp); 10398 10299 10399 10300 /* Write the word the lazy way. */ … … 10407 10308 /* Commit the new RSP value. */ 10408 10309 if (rc == VINF_SUCCESS) 10409 p Ctx->rsp = uNewRsp;10310 pVCpu->cpum.GstCtx.rsp = uNewRsp; 10410 10311 } 10411 10312 … … 10425 10326 { 10426 10327 /* Increment the stack pointer. */ 10427 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10428 10328 RTUINT64U NewRsp = *pTmpRsp; 10429 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx,&NewRsp, 2);10329 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2); 10430 10330 10431 10331 /* Write the word the lazy way. */ … … 10457 10357 { 10458 10358 /* Increment the stack pointer. */ 10459 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10460 10359 RTUINT64U NewRsp = *pTmpRsp; 10461 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx,&NewRsp, 4);10360 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4); 10462 10361 10463 10362 /* Write the word the lazy way. */ … … 10489 10388 { 10490 10389 /* Increment the stack pointer. */ 10491 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10492 10390 RTUINT64U NewRsp = *pTmpRsp; 10493 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx,&NewRsp, 8);10391 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8); 10494 10392 10495 10393 /* Write the word the lazy way. */ … … 10521 10419 { 10522 10420 /* Increment the stack pointer. */ 10523 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10524 10421 RTUINT64U NewRsp = *pTmpRsp; 10525 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx,&NewRsp, 2);10422 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2); 10526 10423 10527 10424 /* Write the word the lazy way. */ … … 10553 10450 { 10554 10451 /* Increment the stack pointer. */ 10555 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10556 10452 RTUINT64U NewRsp = *pTmpRsp; 10557 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx,&NewRsp, 4);10453 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4); 10558 10454 10559 10455 /* Write the word the lazy way. */ … … 10585 10481 { 10586 10482 /* Increment the stack pointer. */ 10587 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10588 10483 RTUINT64U NewRsp = *pTmpRsp; 10589 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx,&NewRsp, 8);10484 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8); 10590 10485 10591 10486 /* Write the word the lazy way. */ … … 10626 10521 { 10627 10522 Assert(cbMem < UINT8_MAX); 10628 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10629 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp); 10523 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp); 10630 10524 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 10631 10525 } … … 10670 10564 { 10671 10565 Assert(cbMem < UINT8_MAX); 10672 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10673 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp); 10566 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp); 10674 10567 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 10675 10568 } … … 10693 10586 { 10694 10587 Assert(cbMem < UINT8_MAX); 10695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10696 10588 RTUINT64U NewRsp; 10697 10589 NewRsp.u = *puNewRsp; 10698 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx,&NewRsp, 8);10590 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8); 10699 10591 *puNewRsp = NewRsp.u; 10700 10592 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); … … 10830 10722 { 10831 10723 AssertPtr(pDesc); 10832 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 10833 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 10724 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 10834 10725 10835 10726 /** @todo did the 286 require all 8 bytes to be accessible? */ … … 10840 10731 if (uSel & X86_SEL_LDT) 10841 10732 { 10842 if ( !p Ctx->ldtr.Attr.n.u1Present10843 || (uSel | X86_SEL_RPL_LDT) > p Ctx->ldtr.u32Limit )10733 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present 10734 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit ) 10844 10735 { 10845 10736 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n", 10846 uSel, p Ctx->ldtr.u32Limit, pCtx->ldtr.Sel));10737 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel)); 10847 10738 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 10848 10739 uErrorCode, 0); 10849 10740 } 10850 10741 10851 Assert(p Ctx->ldtr.Attr.n.u1Present);10852 GCPtrBase = p Ctx->ldtr.u64Base;10742 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); 10743 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base; 10853 10744 } 10854 10745 else 10855 10746 { 10856 if ((uSel | X86_SEL_RPL_LDT) > p Ctx->gdtr.cbGdt)10747 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt) 10857 10748 { 10858 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, p Ctx->gdtr.cbGdt));10749 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt)); 10859 10750 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 10860 10751 uErrorCode, 0); 10861 10752 } 10862 GCPtrBase = p Ctx->gdtr.pGdt;10753 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt; 10863 10754 } 10864 10755 … … 10888 10779 || pDesc->Legacy.Gen.u1DescType) 10889 10780 pDesc->Long.au64[1] = 0; 10890 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? p Ctx->ldtr.u32Limit : pCtx->gdtr.cbGdt))10781 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt)) 10891 10782 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1); 10892 10783 else … … 10946 10837 IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel) 10947 10838 { 10948 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);10949 10950 10839 /* 10951 10840 * Get the selector table base and calculate the entry address. 10952 10841 */ 10953 10842 RTGCPTR GCPtr = uSel & X86_SEL_LDT 10954 ? p Ctx->ldtr.u64Base10955 : p Ctx->gdtr.pGdt;10843 ? pVCpu->cpum.GstCtx.ldtr.u64Base 10844 : pVCpu->cpum.GstCtx.gdtr.pGdt; 10956 10845 GCPtr += uSel & X86_SEL_MASK; 10957 10846 … … 11173 11062 #define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64 11174 11063 #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \ 11175 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \11064 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11176 11065 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \ 11177 11066 } while (0) 11178 11067 #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \ 11179 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \11068 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11180 11069 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \ 11181 11070 } while (0) 11182 11071 #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \ 11183 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \11072 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11184 11073 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \ 11185 11074 } while (0) 11186 11075 /** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */ 11187 11076 #define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \ 11188 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \11077 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11189 11078 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \ 11190 11079 } while (0) 11191 11080 #define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \ 11192 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \11081 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11193 11082 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \ 11194 11083 } while (0) … … 11198 11087 /** @todo IEM_MC_FETCH_LDTR_U16, IEM_MC_FETCH_LDTR_U32, IEM_MC_FETCH_LDTR_U64, IEM_MC_FETCH_TR_U16, IEM_MC_FETCH_TR_U32, and IEM_MC_FETCH_TR_U64 aren't worth it... */ 11199 11088 #define IEM_MC_FETCH_LDTR_U16(a_u16Dst) do { \ 11200 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_LDTR); \11089 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_LDTR); \ 11201 11090 (a_u16Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \ 11202 11091 } while (0) 11203 11092 #define IEM_MC_FETCH_LDTR_U32(a_u32Dst) do { \ 11204 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_LDTR); \11093 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_LDTR); \ 11205 11094 (a_u32Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \ 11206 11095 } while (0) 11207 11096 #define IEM_MC_FETCH_LDTR_U64(a_u64Dst) do { \ 11208 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_LDTR); \11097 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_LDTR); \ 11209 11098 (a_u64Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \ 11210 11099 } while (0) 11211 11100 #define IEM_MC_FETCH_TR_U16(a_u16Dst) do { \ 11212 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_TR); \11101 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_TR); \ 11213 11102 (a_u16Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \ 11214 11103 } while (0) 11215 11104 #define IEM_MC_FETCH_TR_U32(a_u32Dst) do { \ 11216 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_TR); \11105 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_TR); \ 11217 11106 (a_u32Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \ 11218 11107 } while (0) 11219 11108 #define IEM_MC_FETCH_TR_U64(a_u64Dst) do { \ 11220 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_TR); \11109 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_TR); \ 11221 11110 (a_u64Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \ 11222 11111 } while (0) … … 11239 11128 /** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */ 11240 11129 #define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \ 11241 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \11130 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11242 11131 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \ 11243 11132 } while (0) 11244 11133 #define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \ 11245 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \11134 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \ 11246 11135 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \ 11247 11136 } while (0) … … 12792 12681 { 12793 12682 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm)); 12794 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);12795 12683 # define SET_SS_DEF() \ 12796 12684 do \ … … 12824 12712 switch (bRm & X86_MODRM_RM_MASK) 12825 12713 { 12826 case 0: u16EffAddr += p Ctx->bx + pCtx->si; break;12827 case 1: u16EffAddr += p Ctx->bx + pCtx->di; break;12828 case 2: u16EffAddr += p Ctx->bp + pCtx->si; SET_SS_DEF(); break;12829 case 3: u16EffAddr += p Ctx->bp + pCtx->di; SET_SS_DEF(); break;12830 case 4: u16EffAddr += p Ctx->si; break;12831 case 5: u16EffAddr += p Ctx->di; break;12832 case 6: u16EffAddr += p Ctx->bp; SET_SS_DEF(); break;12833 case 7: u16EffAddr += p Ctx->bx; break;12714 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break; 12715 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break; 12716 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break; 12717 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break; 12718 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break; 12719 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break; 12720 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break; 12721 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break; 12834 12722 } 12835 12723 } … … 12850 12738 switch ((bRm & X86_MODRM_RM_MASK)) 12851 12739 { 12852 case 0: u32EffAddr = p Ctx->eax; break;12853 case 1: u32EffAddr = p Ctx->ecx; break;12854 case 2: u32EffAddr = p Ctx->edx; break;12855 case 3: u32EffAddr = p Ctx->ebx; break;12740 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 12741 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 12742 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 12743 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 12856 12744 case 4: /* SIB */ 12857 12745 { … … 12861 12749 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 12862 12750 { 12863 case 0: u32EffAddr = p Ctx->eax; break;12864 case 1: u32EffAddr = p Ctx->ecx; break;12865 case 2: u32EffAddr = p Ctx->edx; break;12866 case 3: u32EffAddr = p Ctx->ebx; break;12751 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 12752 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 12753 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 12754 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 12867 12755 case 4: u32EffAddr = 0; /*none */ break; 12868 case 5: u32EffAddr = p Ctx->ebp; break;12869 case 6: u32EffAddr = p Ctx->esi; break;12870 case 7: u32EffAddr = p Ctx->edi; break;12756 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 12757 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 12758 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 12871 12759 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 12872 12760 } … … 12876 12764 switch (bSib & X86_SIB_BASE_MASK) 12877 12765 { 12878 case 0: u32EffAddr += p Ctx->eax; break;12879 case 1: u32EffAddr += p Ctx->ecx; break;12880 case 2: u32EffAddr += p Ctx->edx; break;12881 case 3: u32EffAddr += p Ctx->ebx; break;12882 case 4: u32EffAddr += p Ctx->esp; SET_SS_DEF(); break;12766 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break; 12767 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break; 12768 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break; 12769 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break; 12770 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break; 12883 12771 case 5: 12884 12772 if ((bRm & X86_MODRM_MOD_MASK) != 0) 12885 12773 { 12886 u32EffAddr += p Ctx->ebp;12774 u32EffAddr += pVCpu->cpum.GstCtx.ebp; 12887 12775 SET_SS_DEF(); 12888 12776 } … … 12894 12782 } 12895 12783 break; 12896 case 6: u32EffAddr += p Ctx->esi; break;12897 case 7: u32EffAddr += p Ctx->edi; break;12784 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break; 12785 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break; 12898 12786 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 12899 12787 } 12900 12788 break; 12901 12789 } 12902 case 5: u32EffAddr = p Ctx->ebp; SET_SS_DEF(); break;12903 case 6: u32EffAddr = p Ctx->esi; break;12904 case 7: u32EffAddr = p Ctx->edi; break;12790 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break; 12791 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 12792 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 12905 12793 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 12906 12794 } … … 12945 12833 { 12946 12834 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 12947 u64EffAddr += p Ctx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;12835 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm; 12948 12836 } 12949 12837 else … … 12952 12840 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 12953 12841 { 12954 case 0: u64EffAddr = p Ctx->rax; break;12955 case 1: u64EffAddr = p Ctx->rcx; break;12956 case 2: u64EffAddr = p Ctx->rdx; break;12957 case 3: u64EffAddr = p Ctx->rbx; break;12958 case 5: u64EffAddr = p Ctx->rbp; SET_SS_DEF(); break;12959 case 6: u64EffAddr = p Ctx->rsi; break;12960 case 7: u64EffAddr = p Ctx->rdi; break;12961 case 8: u64EffAddr = p Ctx->r8; break;12962 case 9: u64EffAddr = p Ctx->r9; break;12963 case 10: u64EffAddr = p Ctx->r10; break;12964 case 11: u64EffAddr = p Ctx->r11; break;12965 case 13: u64EffAddr = p Ctx->r13; break;12966 case 14: u64EffAddr = p Ctx->r14; break;12967 case 15: u64EffAddr = p Ctx->r15; break;12842 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 12843 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 12844 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 12845 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 12846 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break; 12847 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 12848 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 12849 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 12850 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 12851 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 12852 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 12853 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 12854 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 12855 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 12968 12856 /* SIB */ 12969 12857 case 4: … … 12975 12863 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 12976 12864 { 12977 case 0: u64EffAddr = p Ctx->rax; break;12978 case 1: u64EffAddr = p Ctx->rcx; break;12979 case 2: u64EffAddr = p Ctx->rdx; break;12980 case 3: u64EffAddr = p Ctx->rbx; break;12865 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 12866 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 12867 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 12868 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 12981 12869 case 4: u64EffAddr = 0; /*none */ break; 12982 case 5: u64EffAddr = p Ctx->rbp; break;12983 case 6: u64EffAddr = p Ctx->rsi; break;12984 case 7: u64EffAddr = p Ctx->rdi; break;12985 case 8: u64EffAddr = p Ctx->r8; break;12986 case 9: u64EffAddr = p Ctx->r9; break;12987 case 10: u64EffAddr = p Ctx->r10; break;12988 case 11: u64EffAddr = p Ctx->r11; break;12989 case 12: u64EffAddr = p Ctx->r12; break;12990 case 13: u64EffAddr = p Ctx->r13; break;12991 case 14: u64EffAddr = p Ctx->r14; break;12992 case 15: u64EffAddr = p Ctx->r15; break;12870 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 12871 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 12872 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 12873 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 12874 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 12875 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 12876 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 12877 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break; 12878 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 12879 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 12880 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 12993 12881 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 12994 12882 } … … 12998 12886 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 12999 12887 { 13000 case 0: u64EffAddr += p Ctx->rax; break;13001 case 1: u64EffAddr += p Ctx->rcx; break;13002 case 2: u64EffAddr += p Ctx->rdx; break;13003 case 3: u64EffAddr += p Ctx->rbx; break;13004 case 4: u64EffAddr += p Ctx->rsp; SET_SS_DEF(); break;13005 case 6: u64EffAddr += p Ctx->rsi; break;13006 case 7: u64EffAddr += p Ctx->rdi; break;13007 case 8: u64EffAddr += p Ctx->r8; break;13008 case 9: u64EffAddr += p Ctx->r9; break;13009 case 10: u64EffAddr += p Ctx->r10; break;13010 case 11: u64EffAddr += p Ctx->r11; break;13011 case 12: u64EffAddr += p Ctx->r12; break;13012 case 14: u64EffAddr += p Ctx->r14; break;13013 case 15: u64EffAddr += p Ctx->r15; break;12888 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break; 12889 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break; 12890 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break; 12891 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break; 12892 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break; 12893 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break; 12894 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break; 12895 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break; 12896 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break; 12897 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break; 12898 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break; 12899 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break; 12900 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break; 12901 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break; 13014 12902 /* complicated encodings */ 13015 12903 case 5: … … 13019 12907 if (!pVCpu->iem.s.uRexB) 13020 12908 { 13021 u64EffAddr += p Ctx->rbp;12909 u64EffAddr += pVCpu->cpum.GstCtx.rbp; 13022 12910 SET_SS_DEF(); 13023 12911 } 13024 12912 else 13025 u64EffAddr += p Ctx->r13;12913 u64EffAddr += pVCpu->cpum.GstCtx.r13; 13026 12914 } 13027 12915 else … … 13094 12982 { 13095 12983 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm)); 13096 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);13097 12984 # define SET_SS_DEF() \ 13098 12985 do \ … … 13126 13013 switch (bRm & X86_MODRM_RM_MASK) 13127 13014 { 13128 case 0: u16EffAddr += p Ctx->bx + pCtx->si; break;13129 case 1: u16EffAddr += p Ctx->bx + pCtx->di; break;13130 case 2: u16EffAddr += p Ctx->bp + pCtx->si; SET_SS_DEF(); break;13131 case 3: u16EffAddr += p Ctx->bp + pCtx->di; SET_SS_DEF(); break;13132 case 4: u16EffAddr += p Ctx->si; break;13133 case 5: u16EffAddr += p Ctx->di; break;13134 case 6: u16EffAddr += p Ctx->bp; SET_SS_DEF(); break;13135 case 7: u16EffAddr += p Ctx->bx; break;13015 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break; 13016 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break; 13017 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break; 13018 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break; 13019 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break; 13020 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break; 13021 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break; 13022 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break; 13136 13023 } 13137 13024 } … … 13152 13039 switch ((bRm & X86_MODRM_RM_MASK)) 13153 13040 { 13154 case 0: u32EffAddr = p Ctx->eax; break;13155 case 1: u32EffAddr = p Ctx->ecx; break;13156 case 2: u32EffAddr = p Ctx->edx; break;13157 case 3: u32EffAddr = p Ctx->ebx; break;13041 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 13042 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 13043 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 13044 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 13158 13045 case 4: /* SIB */ 13159 13046 { … … 13163 13050 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 13164 13051 { 13165 case 0: u32EffAddr = p Ctx->eax; break;13166 case 1: u32EffAddr = p Ctx->ecx; break;13167 case 2: u32EffAddr = p Ctx->edx; break;13168 case 3: u32EffAddr = p Ctx->ebx; break;13052 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 13053 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 13054 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 13055 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 13169 13056 case 4: u32EffAddr = 0; /*none */ break; 13170 case 5: u32EffAddr = p Ctx->ebp; break;13171 case 6: u32EffAddr = p Ctx->esi; break;13172 case 7: u32EffAddr = p Ctx->edi; break;13057 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 13058 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 13059 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 13173 13060 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 13174 13061 } … … 13178 13065 switch (bSib & X86_SIB_BASE_MASK) 13179 13066 { 13180 case 0: u32EffAddr += p Ctx->eax; break;13181 case 1: u32EffAddr += p Ctx->ecx; break;13182 case 2: u32EffAddr += p Ctx->edx; break;13183 case 3: u32EffAddr += p Ctx->ebx; break;13067 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break; 13068 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break; 13069 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break; 13070 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break; 13184 13071 case 4: 13185 u32EffAddr += p Ctx->esp + offRsp;13072 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp; 13186 13073 SET_SS_DEF(); 13187 13074 break; … … 13189 13076 if ((bRm & X86_MODRM_MOD_MASK) != 0) 13190 13077 { 13191 u32EffAddr += p Ctx->ebp;13078 u32EffAddr += pVCpu->cpum.GstCtx.ebp; 13192 13079 SET_SS_DEF(); 13193 13080 } … … 13199 13086 } 13200 13087 break; 13201 case 6: u32EffAddr += p Ctx->esi; break;13202 case 7: u32EffAddr += p Ctx->edi; break;13088 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break; 13089 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break; 13203 13090 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 13204 13091 } 13205 13092 break; 13206 13093 } 13207 case 5: u32EffAddr = p Ctx->ebp; SET_SS_DEF(); break;13208 case 6: u32EffAddr = p Ctx->esi; break;13209 case 7: u32EffAddr = p Ctx->edi; break;13094 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break; 13095 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 13096 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 13210 13097 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 13211 13098 } … … 13250 13137 { 13251 13138 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 13252 u64EffAddr += p Ctx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;13139 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm; 13253 13140 } 13254 13141 else … … 13257 13144 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 13258 13145 { 13259 case 0: u64EffAddr = p Ctx->rax; break;13260 case 1: u64EffAddr = p Ctx->rcx; break;13261 case 2: u64EffAddr = p Ctx->rdx; break;13262 case 3: u64EffAddr = p Ctx->rbx; break;13263 case 5: u64EffAddr = p Ctx->rbp; SET_SS_DEF(); break;13264 case 6: u64EffAddr = p Ctx->rsi; break;13265 case 7: u64EffAddr = p Ctx->rdi; break;13266 case 8: u64EffAddr = p Ctx->r8; break;13267 case 9: u64EffAddr = p Ctx->r9; break;13268 case 10: u64EffAddr = p Ctx->r10; break;13269 case 11: u64EffAddr = p Ctx->r11; break;13270 case 13: u64EffAddr = p Ctx->r13; break;13271 case 14: u64EffAddr = p Ctx->r14; break;13272 case 15: u64EffAddr = p Ctx->r15; break;13146 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 13147 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 13148 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 13149 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 13150 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break; 13151 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 13152 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 13153 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 13154 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 13155 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 13156 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 13157 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 13158 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 13159 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 13273 13160 /* SIB */ 13274 13161 case 4: … … 13280 13167 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 13281 13168 { 13282 case 0: u64EffAddr = p Ctx->rax; break;13283 case 1: u64EffAddr = p Ctx->rcx; break;13284 case 2: u64EffAddr = p Ctx->rdx; break;13285 case 3: u64EffAddr = p Ctx->rbx; break;13169 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 13170 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 13171 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 13172 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 13286 13173 case 4: u64EffAddr = 0; /*none */ break; 13287 case 5: u64EffAddr = p Ctx->rbp; break;13288 case 6: u64EffAddr = p Ctx->rsi; break;13289 case 7: u64EffAddr = p Ctx->rdi; break;13290 case 8: u64EffAddr = p Ctx->r8; break;13291 case 9: u64EffAddr = p Ctx->r9; break;13292 case 10: u64EffAddr = p Ctx->r10; break;13293 case 11: u64EffAddr = p Ctx->r11; break;13294 case 12: u64EffAddr = p Ctx->r12; break;13295 case 13: u64EffAddr = p Ctx->r13; break;13296 case 14: u64EffAddr = p Ctx->r14; break;13297 case 15: u64EffAddr = p Ctx->r15; break;13174 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 13175 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 13176 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 13177 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 13178 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 13179 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 13180 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 13181 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break; 13182 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 13183 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 13184 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 13298 13185 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 13299 13186 } … … 13303 13190 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 13304 13191 { 13305 case 0: u64EffAddr += p Ctx->rax; break;13306 case 1: u64EffAddr += p Ctx->rcx; break;13307 case 2: u64EffAddr += p Ctx->rdx; break;13308 case 3: u64EffAddr += p Ctx->rbx; break;13309 case 4: u64EffAddr += p Ctx->rsp + offRsp; SET_SS_DEF(); break;13310 case 6: u64EffAddr += p Ctx->rsi; break;13311 case 7: u64EffAddr += p Ctx->rdi; break;13312 case 8: u64EffAddr += p Ctx->r8; break;13313 case 9: u64EffAddr += p Ctx->r9; break;13314 case 10: u64EffAddr += p Ctx->r10; break;13315 case 11: u64EffAddr += p Ctx->r11; break;13316 case 12: u64EffAddr += p Ctx->r12; break;13317 case 14: u64EffAddr += p Ctx->r14; break;13318 case 15: u64EffAddr += p Ctx->r15; break;13192 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break; 13193 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break; 13194 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break; 13195 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break; 13196 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break; 13197 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break; 13198 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break; 13199 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break; 13200 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break; 13201 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break; 13202 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break; 13203 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break; 13204 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break; 13205 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break; 13319 13206 /* complicated encodings */ 13320 13207 case 5: … … 13324 13211 if (!pVCpu->iem.s.uRexB) 13325 13212 { 13326 u64EffAddr += p Ctx->rbp;13213 u64EffAddr += pVCpu->cpum.GstCtx.rbp; 13327 13214 SET_SS_DEF(); 13328 13215 } 13329 13216 else 13330 u64EffAddr += p Ctx->r13;13217 u64EffAddr += pVCpu->cpum.GstCtx.r13; 13331 13218 } 13332 13219 else … … 13400 13287 { 13401 13288 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm)); 13402 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);13403 13289 # define SET_SS_DEF() \ 13404 13290 do \ … … 13432 13318 switch (bRm & X86_MODRM_RM_MASK) 13433 13319 { 13434 case 0: u16EffAddr += p Ctx->bx + pCtx->si; break;13435 case 1: u16EffAddr += p Ctx->bx + pCtx->di; break;13436 case 2: u16EffAddr += p Ctx->bp + pCtx->si; SET_SS_DEF(); break;13437 case 3: u16EffAddr += p Ctx->bp + pCtx->di; SET_SS_DEF(); break;13438 case 4: u16EffAddr += p Ctx->si; break;13439 case 5: u16EffAddr += p Ctx->di; break;13440 case 6: u16EffAddr += p Ctx->bp; SET_SS_DEF(); break;13441 case 7: u16EffAddr += p Ctx->bx; break;13320 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break; 13321 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break; 13322 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break; 13323 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break; 13324 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break; 13325 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break; 13326 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break; 13327 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break; 13442 13328 } 13443 13329 } … … 13458 13344 switch ((bRm & X86_MODRM_RM_MASK)) 13459 13345 { 13460 case 0: u32EffAddr = p Ctx->eax; break;13461 case 1: u32EffAddr = p Ctx->ecx; break;13462 case 2: u32EffAddr = p Ctx->edx; break;13463 case 3: u32EffAddr = p Ctx->ebx; break;13346 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 13347 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 13348 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 13349 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 13464 13350 case 4: /* SIB */ 13465 13351 { … … 13469 13355 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 13470 13356 { 13471 case 0: u32EffAddr = p Ctx->eax; break;13472 case 1: u32EffAddr = p Ctx->ecx; break;13473 case 2: u32EffAddr = p Ctx->edx; break;13474 case 3: u32EffAddr = p Ctx->ebx; break;13357 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 13358 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 13359 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 13360 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 13475 13361 case 4: u32EffAddr = 0; /*none */ break; 13476 case 5: u32EffAddr = p Ctx->ebp; break;13477 case 6: u32EffAddr = p Ctx->esi; break;13478 case 7: u32EffAddr = p Ctx->edi; break;13362 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 13363 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 13364 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 13479 13365 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 13480 13366 } … … 13484 13370 switch (bSib & X86_SIB_BASE_MASK) 13485 13371 { 13486 case 0: u32EffAddr += p Ctx->eax; break;13487 case 1: u32EffAddr += p Ctx->ecx; break;13488 case 2: u32EffAddr += p Ctx->edx; break;13489 case 3: u32EffAddr += p Ctx->ebx; break;13490 case 4: u32EffAddr += p Ctx->esp; SET_SS_DEF(); break;13372 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break; 13373 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break; 13374 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break; 13375 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break; 13376 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break; 13491 13377 case 5: 13492 13378 if ((bRm & X86_MODRM_MOD_MASK) != 0) 13493 13379 { 13494 u32EffAddr += p Ctx->ebp;13380 u32EffAddr += pVCpu->cpum.GstCtx.ebp; 13495 13381 SET_SS_DEF(); 13496 13382 } … … 13502 13388 } 13503 13389 break; 13504 case 6: u32EffAddr += p Ctx->esi; break;13505 case 7: u32EffAddr += p Ctx->edi; break;13390 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break; 13391 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break; 13506 13392 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 13507 13393 } 13508 13394 break; 13509 13395 } 13510 case 5: u32EffAddr = p Ctx->ebp; SET_SS_DEF(); break;13511 case 6: u32EffAddr = p Ctx->esi; break;13512 case 7: u32EffAddr = p Ctx->edi; break;13396 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break; 13397 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 13398 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 13513 13399 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 13514 13400 } … … 13552 13438 { 13553 13439 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 13554 u64EffAddr += p Ctx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;13440 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm; 13555 13441 } 13556 13442 else … … 13559 13445 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 13560 13446 { 13561 case 0: u64EffAddr = p Ctx->rax; break;13562 case 1: u64EffAddr = p Ctx->rcx; break;13563 case 2: u64EffAddr = p Ctx->rdx; break;13564 case 3: u64EffAddr = p Ctx->rbx; break;13565 case 5: u64EffAddr = p Ctx->rbp; SET_SS_DEF(); break;13566 case 6: u64EffAddr = p Ctx->rsi; break;13567 case 7: u64EffAddr = p Ctx->rdi; break;13568 case 8: u64EffAddr = p Ctx->r8; break;13569 case 9: u64EffAddr = p Ctx->r9; break;13570 case 10: u64EffAddr = p Ctx->r10; break;13571 case 11: u64EffAddr = p Ctx->r11; break;13572 case 13: u64EffAddr = p Ctx->r13; break;13573 case 14: u64EffAddr = p Ctx->r14; break;13574 case 15: u64EffAddr = p Ctx->r15; break;13447 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 13448 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 13449 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 13450 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 13451 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break; 13452 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 13453 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 13454 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 13455 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 13456 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 13457 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 13458 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 13459 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 13460 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 13575 13461 /* SIB */ 13576 13462 case 4: … … 13582 13468 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 13583 13469 { 13584 case 0: u64EffAddr = p Ctx->rax; break;13585 case 1: u64EffAddr = p Ctx->rcx; break;13586 case 2: u64EffAddr = p Ctx->rdx; break;13587 case 3: u64EffAddr = p Ctx->rbx; break;13470 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 13471 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 13472 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 13473 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 13588 13474 case 4: u64EffAddr = 0; /*none */ break; 13589 case 5: u64EffAddr = p Ctx->rbp; break;13590 case 6: u64EffAddr = p Ctx->rsi; break;13591 case 7: u64EffAddr = p Ctx->rdi; break;13592 case 8: u64EffAddr = p Ctx->r8; break;13593 case 9: u64EffAddr = p Ctx->r9; break;13594 case 10: u64EffAddr = p Ctx->r10; break;13595 case 11: u64EffAddr = p Ctx->r11; break;13596 case 12: u64EffAddr = p Ctx->r12; break;13597 case 13: u64EffAddr = p Ctx->r13; break;13598 case 14: u64EffAddr = p Ctx->r14; break;13599 case 15: u64EffAddr = p Ctx->r15; break;13475 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 13476 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 13477 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 13478 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 13479 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 13480 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 13481 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 13482 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break; 13483 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 13484 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 13485 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 13600 13486 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 13601 13487 } … … 13605 13491 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 13606 13492 { 13607 case 0: u64EffAddr += p Ctx->rax; break;13608 case 1: u64EffAddr += p Ctx->rcx; break;13609 case 2: u64EffAddr += p Ctx->rdx; break;13610 case 3: u64EffAddr += p Ctx->rbx; break;13611 case 4: u64EffAddr += p Ctx->rsp; SET_SS_DEF(); break;13612 case 6: u64EffAddr += p Ctx->rsi; break;13613 case 7: u64EffAddr += p Ctx->rdi; break;13614 case 8: u64EffAddr += p Ctx->r8; break;13615 case 9: u64EffAddr += p Ctx->r9; break;13616 case 10: u64EffAddr += p Ctx->r10; break;13617 case 11: u64EffAddr += p Ctx->r11; break;13618 case 12: u64EffAddr += p Ctx->r12; break;13619 case 14: u64EffAddr += p Ctx->r14; break;13620 case 15: u64EffAddr += p Ctx->r15; break;13493 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break; 13494 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break; 13495 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break; 13496 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break; 13497 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break; 13498 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break; 13499 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break; 13500 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break; 13501 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break; 13502 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break; 13503 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break; 13504 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break; 13505 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break; 13506 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break; 13621 13507 /* complicated encodings */ 13622 13508 case 5: … … 13626 13512 if (!pVCpu->iem.s.uRexB) 13627 13513 { 13628 u64EffAddr += p Ctx->rbp;13514 u64EffAddr += pVCpu->cpum.GstCtx.rbp; 13629 13515 SET_SS_DEF(); 13630 13516 } 13631 13517 else 13632 u64EffAddr += p Ctx->r13;13518 u64EffAddr += pVCpu->cpum.GstCtx.r13; 13633 13519 } 13634 13520 else … … 13696 13582 * Logs the current instruction. 13697 13583 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 13698 * @param pCtx The current CPU context.13699 13584 * @param fSameCtx Set if we have the same context information as the VMM, 13700 13585 * clear if we may have already executed an instruction in 13701 13586 * our debug context. When clear, we assume IEMCPU holds 13702 13587 * valid CPU mode info. 13703 */ 13704 IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx) 13588 * 13589 * The @a fSameCtx parameter is now misleading and obsolete. 13590 */ 13591 IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx) 13705 13592 { 13706 13593 # ifdef IN_RING3 … … 13721 13608 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break; 13722 13609 case IEMMODE_16BIT: 13723 if (!(p Ctx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)13610 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM) 13724 13611 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE; 13725 13612 else … … 13727 13614 break; 13728 13615 } 13729 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, p Ctx->cs.Sel, pCtx->rip, fFlags,13616 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags, 13730 13617 szInstr, sizeof(szInstr), &cbInstr); 13731 13618 } 13732 13619 13733 PCX86FXSTATE pFpuCtx = &p Ctx->CTX_SUFF(pXState)->x87;13620 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 13734 13621 Log2(("****\n" 13735 13622 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n" … … 13739 13626 " %s\n" 13740 13627 , 13741 p Ctx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,13742 p Ctx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,13743 p Ctx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,13744 p Ctx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,13628 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi, 13629 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel, 13630 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel, 13631 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u, 13745 13632 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK, 13746 13633 szInstr)); … … 13752 13639 # endif 13753 13640 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", 13754 p Ctx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));13755 RT_NOREF_PV(pVCpu); RT_NOREF_PV( pCtx); RT_NOREF_PV(fSameCtx);13641 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u)); 13642 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx); 13756 13643 } 13757 13644 #endif /* LOG_ENABLED */ … … 13899 13786 { 13900 13787 #ifdef LOG_ENABLED 13901 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu),false);13788 iemLogCurInstr(pVCpu, false); 13902 13789 #endif 13903 13790 #ifdef IEM_WITH_SETJMP … … 13948 13835 * @returns rcStrict, maybe modified. 13949 13836 * @param pVCpu The cross context virtual CPU structure of the calling thread. 13950 * @param pCtx The current CPU context.13951 13837 * @param rcStrict The status code returne by the interpreter. 13952 13838 */ 13953 DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx,VBOXSTRICTRC rcStrict)13839 DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict) 13954 13840 { 13955 13841 if ( !pVCpu->iem.s.fInPatchCode … … 13958 13844 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) ) 13959 13845 { 13960 if (p Ctx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)13846 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS) 13961 13847 CPUMRawEnter(pVCpu); 13962 13848 else … … 13980 13866 { 13981 13867 #ifdef LOG_ENABLED 13982 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 13983 iemLogCurInstr(pVCpu, pCtx, true); 13868 iemLogCurInstr(pVCpu, true); 13984 13869 #endif 13985 13870 … … 13994 13879 13995 13880 #ifdef IN_RC 13996 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),rcStrict);13881 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict); 13997 13882 #endif 13998 13883 if (rcStrict != VINF_SUCCESS) 13999 13884 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", 14000 p Ctx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));13885 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict))); 14001 13886 return rcStrict; 14002 13887 } … … 14020 13905 14021 13906 #ifdef IN_RC 14022 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx,rcStrict);13907 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict); 14023 13908 #endif 14024 13909 return rcStrict; … … 14034 13919 VBOXSTRICTRC rcStrict; 14035 13920 if ( cbOpcodeBytes 14036 && p Ctx->rip == OpcodeBytesPC)13921 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC) 14037 13922 { 14038 13923 iemInitDecoder(pVCpu, false); … … 14057 13942 14058 13943 #ifdef IN_RC 14059 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx,rcStrict);13944 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict); 14060 13945 #endif 14061 13946 return rcStrict; … … 14080 13965 14081 13966 #ifdef IN_RC 14082 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx,rcStrict);13967 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict); 14083 13968 #endif 14084 13969 return rcStrict; … … 14094 13979 VBOXSTRICTRC rcStrict; 14095 13980 if ( cbOpcodeBytes 14096 && p Ctx->rip == OpcodeBytesPC)13981 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC) 14097 13982 { 14098 13983 iemInitDecoder(pVCpu, true); … … 14117 14002 14118 14003 #ifdef IN_RC 14119 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx,rcStrict);14004 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict); 14120 14005 #endif 14121 14006 return rcStrict; … … 14146 14031 VBOXSTRICTRC rcStrict; 14147 14032 if ( cbOpcodeBytes 14148 && p Ctx->rip == OpcodeBytesPC)14033 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC) 14149 14034 { 14150 14035 iemInitDecoder(pVCpu, true); … … 14173 14058 14174 14059 #ifdef IN_RC 14175 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx,rcStrict);14060 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict); 14176 14061 #endif 14177 14062 return rcStrict; … … 14186 14071 * See if there is an interrupt pending in TRPM, inject it if we can. 14187 14072 */ 14188 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);14189 14190 14073 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */ 14191 14074 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) 14192 bool fIntrEnabled = p Ctx->hwvirt.fGif;14075 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif; 14193 14076 if (fIntrEnabled) 14194 14077 { 14195 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))14196 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);14078 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 14079 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu)); 14197 14080 else 14198 fIntrEnabled = p Ctx->eflags.Bits.u1IF;14081 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF; 14199 14082 } 14200 14083 #else 14201 bool fIntrEnabled = p Ctx->eflags.Bits.u1IF;14084 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF; 14202 14085 #endif 14203 14086 if ( fIntrEnabled 14204 14087 && TRPMHasTrap(pVCpu) 14205 && EMGetInhibitInterruptsPC(pVCpu) != p Ctx->rip)14088 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip) 14206 14089 { 14207 14090 uint8_t u8TrapNo; … … 14239 14122 */ 14240 14123 #ifdef LOG_ENABLED 14241 iemLogCurInstr(pVCpu, pCtx,true);14124 iemLogCurInstr(pVCpu, true); 14242 14125 #endif 14243 14126 … … 14269 14152 if (RT_LIKELY( ( !fCpu 14270 14153 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 14271 && !p Ctx->rflags.Bits.u1IF) )14154 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) ) 14272 14155 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )) 14273 14156 { … … 14322 14205 */ 14323 14206 #ifdef IN_RC 14324 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),rcStrict);14207 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict); 14325 14208 #endif 14326 14209 if (rcStrict != VINF_SUCCESS) 14327 14210 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", 14328 p Ctx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));14211 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict))); 14329 14212 if (pcInstructions) 14330 14213 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart; … … 14479 14362 else 14480 14363 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", 14481 p Ctx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));14364 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict))); 14482 14365 return rcStrict; 14483 14366 } … … 14511 14394 iemUninitExec(pVCpu); 14512 14395 #ifdef IN_RC 14513 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), 14514 iemExecStatusCodeFiddling(pVCpu, rcStrict)); 14396 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict)); 14515 14397 #else 14516 14398 return iemExecStatusCodeFiddling(pVCpu, rcStrict); … … 15103 14985 VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) 15104 14986 { 15105 IEM_CTX_ASSERT( IEM_GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_MUST_MASK);15106 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu),uExitCode, uExitInfo1, uExitInfo2);14987 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 14988 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2); 15107 14989 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15108 14990 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r72494 r72496 31 31 * 32 32 * @param pVCpu The cross context virtual CPU structure of the calling thread. 33 * @param pCtx The register context.34 33 * @param u16Port The port number. 35 34 * @param cbOperand The operand size. 36 35 */ 37 static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, PCCPUMCTX pCtx,uint16_t u16Port, uint8_t cbOperand)36 static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbOperand) 38 37 { 39 38 /* The TSS bits we're interested in are the same on 386 and AMD64. */ … … 43 42 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64)); 44 43 45 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx,CPUMCTX_EXTRN_TR);44 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR); 46 45 47 46 /* 48 47 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap. 49 48 */ 50 Assert(!p Ctx->tr.Attr.n.u1DescType);51 if (RT_UNLIKELY( p Ctx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY52 && p Ctx->tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))49 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType); 50 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY 51 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL)) 53 52 { 54 53 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n", 55 u16Port, cbOperand, p Ctx->tr.Attr.n.u4Type, pCtx->tr.Attr.u));54 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u)); 56 55 return iemRaiseGeneralProtectionFault0(pVCpu); 57 56 } … … 62 61 uint16_t offBitmap; 63 62 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX, 64 p Ctx->tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap));63 pVCpu->cpum.GstCtx.tr.u64Base + RT_OFFSETOF(X86TSS64, offIoBitmap)); 65 64 if (rcStrict != VINF_SUCCESS) 66 65 { … … 77 76 /** @todo check if real CPUs ensures that offBitmap has a minimum value of 78 77 * for instance sizeof(X86TSS32). */ 79 if (offFirstBit + 1 > p Ctx->tr.u32Limit) /* the limit is inclusive */78 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */ 80 79 { 81 80 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n", 82 offFirstBit, p Ctx->tr.u32Limit));81 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit)); 83 82 return iemRaiseGeneralProtectionFault0(pVCpu); 84 83 } … … 91 90 * 2nd byte when it's not required. */ 92 91 uint16_t bmBytes = UINT16_MAX; 93 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, p Ctx->tr.u64Base + offFirstBit);92 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit); 94 93 if (rcStrict != VINF_SUCCESS) 95 94 { … … 122 121 * 123 122 * @param pVCpu The cross context virtual CPU structure of the calling thread. 124 * @param pCtx The register context.125 123 * @param u16Port The port number. 126 124 * @param cbOperand The operand size. 127 125 */ 128 DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, PCCPUMCTX pCtx,uint16_t u16Port, uint8_t cbOperand)126 DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbOperand) 129 127 { 130 128 X86EFLAGS Efl; 131 Efl.u = IEMMISC_GET_EFL(pVCpu , pCtx);132 if ( (p Ctx->cr0 & X86_CR0_PE)129 Efl.u = IEMMISC_GET_EFL(pVCpu); 130 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) 133 131 && ( pVCpu->iem.s.uCpl > Efl.Bits.u2IOPL 134 132 || Efl.Bits.u1VM) ) 135 return iemHlpCheckPortIOPermissionBitmap(pVCpu, pCtx,u16Port, cbOperand);133 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand); 136 134 return VINF_SUCCESS; 137 135 } … … 182 180 static void iemHlpUpdateArithEFlagsU8(PVMCPU pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined) 183 181 { 184 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 185 186 uint32_t fEFlags = pCtx->eflags.u; 182 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u; 187 183 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags); 188 p Ctx->eflags.u &= ~(fToUpdate | fUndefined);189 p Ctx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;184 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined); 185 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags; 190 186 } 191 187 … … 201 197 static void iemHlpUpdateArithEFlagsU16(PVMCPU pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined) 202 198 { 203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 204 205 uint32_t fEFlags = pCtx->eflags.u; 199 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u; 206 200 iemAImpl_test_u16(&u16Result, u16Result, &fEFlags); 207 p Ctx->eflags.u &= ~(fToUpdate | fUndefined);208 p Ctx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;201 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined); 202 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags; 209 203 } 210 204 … … 225 219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 226 220 #endif 227 IEM_CTX_ASSERT( IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_MASK);221 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK); 228 222 229 223 if ( uCpl > pSReg->Attr.n.u2Dpl … … 256 250 IEM_CIMPL_DEF_0(iemCImpl_popa_16) 257 251 { 258 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 259 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx); 252 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu); 260 253 RTGCPTR GCPtrLast = GCPtrStart + 15; 261 254 VBOXSTRICTRC rcStrict; … … 270 263 /** @todo do popa boundary / wrap-around checks. */ 271 264 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu) 272 && (p Ctx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */265 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */ 273 266 { 274 267 /* word-by-word */ 275 268 RTUINT64U TmpRsp; 276 TmpRsp.u = p Ctx->rsp;277 rcStrict = iemMemStackPopU16Ex(pVCpu, &p Ctx->di, &TmpRsp);269 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 270 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp); 278 271 if (rcStrict == VINF_SUCCESS) 279 rcStrict = iemMemStackPopU16Ex(pVCpu, &p Ctx->si, &TmpRsp);272 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp); 280 273 if (rcStrict == VINF_SUCCESS) 281 rcStrict = iemMemStackPopU16Ex(pVCpu, &p Ctx->bp, &TmpRsp);274 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp); 282 275 if (rcStrict == VINF_SUCCESS) 283 276 { 284 iemRegAddToRspEx(pVCpu, pCtx,&TmpRsp, 2); /* sp */285 rcStrict = iemMemStackPopU16Ex(pVCpu, &p Ctx->bx, &TmpRsp);277 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */ 278 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp); 286 279 } 287 280 if (rcStrict == VINF_SUCCESS) 288 rcStrict = iemMemStackPopU16Ex(pVCpu, &p Ctx->dx, &TmpRsp);281 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp); 289 282 if (rcStrict == VINF_SUCCESS) 290 rcStrict = iemMemStackPopU16Ex(pVCpu, &p Ctx->cx, &TmpRsp);283 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp); 291 284 if (rcStrict == VINF_SUCCESS) 292 rcStrict = iemMemStackPopU16Ex(pVCpu, &p Ctx->ax, &TmpRsp);285 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp); 293 286 if (rcStrict == VINF_SUCCESS) 294 287 { 295 p Ctx->rsp = TmpRsp.u;288 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 296 289 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 297 290 } … … 303 296 if (rcStrict == VINF_SUCCESS) 304 297 { 305 p Ctx->di = pa16Mem[7 - X86_GREG_xDI];306 p Ctx->si = pa16Mem[7 - X86_GREG_xSI];307 p Ctx->bp = pa16Mem[7 - X86_GREG_xBP];298 pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI]; 299 pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI]; 300 pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP]; 308 301 /* skip sp */ 309 p Ctx->bx = pa16Mem[7 - X86_GREG_xBX];310 p Ctx->dx = pa16Mem[7 - X86_GREG_xDX];311 p Ctx->cx = pa16Mem[7 - X86_GREG_xCX];312 p Ctx->ax = pa16Mem[7 - X86_GREG_xAX];302 pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX]; 303 pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX]; 304 pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX]; 305 pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX]; 313 306 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R); 314 307 if (rcStrict == VINF_SUCCESS) 315 308 { 316 iemRegAddToRsp(pVCpu, pCtx,16);309 iemRegAddToRsp(pVCpu, 16); 317 310 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 318 311 } … … 328 321 IEM_CIMPL_DEF_0(iemCImpl_popa_32) 329 322 { 330 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 331 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu, pCtx); 323 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu); 332 324 RTGCPTR GCPtrLast = GCPtrStart + 31; 333 325 VBOXSTRICTRC rcStrict; … … 342 334 /** @todo do popa boundary / wrap-around checks. */ 343 335 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu) 344 && (p Ctx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */336 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */ 345 337 { 346 338 /* word-by-word */ 347 339 RTUINT64U TmpRsp; 348 TmpRsp.u = p Ctx->rsp;349 rcStrict = iemMemStackPopU32Ex(pVCpu, &p Ctx->edi, &TmpRsp);340 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 341 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp); 350 342 if (rcStrict == VINF_SUCCESS) 351 rcStrict = iemMemStackPopU32Ex(pVCpu, &p Ctx->esi, &TmpRsp);343 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp); 352 344 if (rcStrict == VINF_SUCCESS) 353 rcStrict = iemMemStackPopU32Ex(pVCpu, &p Ctx->ebp, &TmpRsp);345 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp); 354 346 if (rcStrict == VINF_SUCCESS) 355 347 { 356 iemRegAddToRspEx(pVCpu, pCtx,&TmpRsp, 2); /* sp */357 rcStrict = iemMemStackPopU32Ex(pVCpu, &p Ctx->ebx, &TmpRsp);348 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */ 349 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp); 358 350 } 359 351 if (rcStrict == VINF_SUCCESS) 360 rcStrict = iemMemStackPopU32Ex(pVCpu, &p Ctx->edx, &TmpRsp);352 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp); 361 353 if (rcStrict == VINF_SUCCESS) 362 rcStrict = iemMemStackPopU32Ex(pVCpu, &p Ctx->ecx, &TmpRsp);354 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp); 363 355 if (rcStrict == VINF_SUCCESS) 364 rcStrict = iemMemStackPopU32Ex(pVCpu, &p Ctx->eax, &TmpRsp);356 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp); 365 357 if (rcStrict == VINF_SUCCESS) 366 358 { 367 359 #if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */ 368 p Ctx->rdi &= UINT32_MAX;369 p Ctx->rsi &= UINT32_MAX;370 p Ctx->rbp &= UINT32_MAX;371 p Ctx->rbx &= UINT32_MAX;372 p Ctx->rdx &= UINT32_MAX;373 p Ctx->rcx &= UINT32_MAX;374 p Ctx->rax &= UINT32_MAX;360 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX; 361 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX; 362 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX; 363 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX; 364 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX; 365 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX; 366 pVCpu->cpum.GstCtx.rax &= UINT32_MAX; 375 367 #endif 376 p Ctx->rsp = TmpRsp.u;368 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 377 369 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 378 370 } … … 384 376 if (rcStrict == VINF_SUCCESS) 385 377 { 386 p Ctx->rdi = pa32Mem[7 - X86_GREG_xDI];387 p Ctx->rsi = pa32Mem[7 - X86_GREG_xSI];388 p Ctx->rbp = pa32Mem[7 - X86_GREG_xBP];378 pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI]; 379 pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI]; 380 pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP]; 389 381 /* skip esp */ 390 p Ctx->rbx = pa32Mem[7 - X86_GREG_xBX];391 p Ctx->rdx = pa32Mem[7 - X86_GREG_xDX];392 p Ctx->rcx = pa32Mem[7 - X86_GREG_xCX];393 p Ctx->rax = pa32Mem[7 - X86_GREG_xAX];382 pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX]; 383 pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX]; 384 pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX]; 385 pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX]; 394 386 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R); 395 387 if (rcStrict == VINF_SUCCESS) 396 388 { 397 iemRegAddToRsp(pVCpu, pCtx,32);389 iemRegAddToRsp(pVCpu, 32); 398 390 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 399 391 } … … 409 401 IEM_CIMPL_DEF_0(iemCImpl_pusha_16) 410 402 { 411 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 412 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx); 403 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu); 413 404 RTGCPTR GCPtrBottom = GCPtrTop - 15; 414 405 VBOXSTRICTRC rcStrict; … … 427 418 /* word-by-word */ 428 419 RTUINT64U TmpRsp; 429 TmpRsp.u = p Ctx->rsp;430 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->ax, &TmpRsp);420 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 421 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp); 431 422 if (rcStrict == VINF_SUCCESS) 432 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->cx, &TmpRsp);423 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp); 433 424 if (rcStrict == VINF_SUCCESS) 434 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->dx, &TmpRsp);425 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp); 435 426 if (rcStrict == VINF_SUCCESS) 436 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->bx, &TmpRsp);427 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp); 437 428 if (rcStrict == VINF_SUCCESS) 438 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->sp, &TmpRsp);429 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp); 439 430 if (rcStrict == VINF_SUCCESS) 440 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->bp, &TmpRsp);431 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp); 441 432 if (rcStrict == VINF_SUCCESS) 442 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->si, &TmpRsp);433 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp); 443 434 if (rcStrict == VINF_SUCCESS) 444 rcStrict = iemMemStackPushU16Ex(pVCpu, p Ctx->di, &TmpRsp);435 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp); 445 436 if (rcStrict == VINF_SUCCESS) 446 437 { 447 p Ctx->rsp = TmpRsp.u;438 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 448 439 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 449 440 } … … 456 447 if (rcStrict == VINF_SUCCESS) 457 448 { 458 pa16Mem[7 - X86_GREG_xDI] = p Ctx->di;459 pa16Mem[7 - X86_GREG_xSI] = p Ctx->si;460 pa16Mem[7 - X86_GREG_xBP] = p Ctx->bp;461 pa16Mem[7 - X86_GREG_xSP] = p Ctx->sp;462 pa16Mem[7 - X86_GREG_xBX] = p Ctx->bx;463 pa16Mem[7 - X86_GREG_xDX] = p Ctx->dx;464 pa16Mem[7 - X86_GREG_xCX] = p Ctx->cx;465 pa16Mem[7 - X86_GREG_xAX] = p Ctx->ax;449 pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di; 450 pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si; 451 pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp; 452 pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp; 453 pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx; 454 pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx; 455 pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx; 456 pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax; 466 457 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W); 467 458 if (rcStrict == VINF_SUCCESS) 468 459 { 469 iemRegSubFromRsp(pVCpu, pCtx,16);460 iemRegSubFromRsp(pVCpu, 16); 470 461 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 471 462 } … … 481 472 IEM_CIMPL_DEF_0(iemCImpl_pusha_32) 482 473 { 483 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 484 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu, pCtx); 474 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu); 485 475 RTGCPTR GCPtrBottom = GCPtrTop - 31; 486 476 VBOXSTRICTRC rcStrict; … … 499 489 /* word-by-word */ 500 490 RTUINT64U TmpRsp; 501 TmpRsp.u = p Ctx->rsp;502 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->eax, &TmpRsp);491 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 492 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp); 503 493 if (rcStrict == VINF_SUCCESS) 504 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->ecx, &TmpRsp);494 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp); 505 495 if (rcStrict == VINF_SUCCESS) 506 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->edx, &TmpRsp);496 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp); 507 497 if (rcStrict == VINF_SUCCESS) 508 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->ebx, &TmpRsp);498 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp); 509 499 if (rcStrict == VINF_SUCCESS) 510 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->esp, &TmpRsp);500 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp); 511 501 if (rcStrict == VINF_SUCCESS) 512 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->ebp, &TmpRsp);502 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp); 513 503 if (rcStrict == VINF_SUCCESS) 514 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->esi, &TmpRsp);504 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp); 515 505 if (rcStrict == VINF_SUCCESS) 516 rcStrict = iemMemStackPushU32Ex(pVCpu, p Ctx->edi, &TmpRsp);506 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp); 517 507 if (rcStrict == VINF_SUCCESS) 518 508 { 519 p Ctx->rsp = TmpRsp.u;509 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 520 510 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 521 511 } … … 528 518 if (rcStrict == VINF_SUCCESS) 529 519 { 530 pa32Mem[7 - X86_GREG_xDI] = p Ctx->edi;531 pa32Mem[7 - X86_GREG_xSI] = p Ctx->esi;532 pa32Mem[7 - X86_GREG_xBP] = p Ctx->ebp;533 pa32Mem[7 - X86_GREG_xSP] = p Ctx->esp;534 pa32Mem[7 - X86_GREG_xBX] = p Ctx->ebx;535 pa32Mem[7 - X86_GREG_xDX] = p Ctx->edx;536 pa32Mem[7 - X86_GREG_xCX] = p Ctx->ecx;537 pa32Mem[7 - X86_GREG_xAX] = p Ctx->eax;520 pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi; 521 pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi; 522 pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp; 523 pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp; 524 pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx; 525 pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx; 526 pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx; 527 pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax; 538 528 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W); 539 529 if (rcStrict == VINF_SUCCESS) 540 530 { 541 iemRegSubFromRsp(pVCpu, pCtx,32);531 iemRegSubFromRsp(pVCpu, 32); 542 532 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 543 533 } … … 556 546 IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize) 557 547 { 558 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);559 548 VBOXSTRICTRC rcStrict; 560 549 … … 570 559 * doing this in a C implementation). 571 560 */ 572 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu , pCtx);561 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu); 573 562 if ( (fEfl & X86_EFL_VM) 574 563 && X86_EFL_GET_IOPL(fEfl) != 3 ) 575 564 { 576 Assert(p Ctx->cr0 & X86_CR0_PE);565 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE); 577 566 if ( enmEffOpSize != IEMMODE_16BIT 578 || !(p Ctx->cr4 & X86_CR4_VME))567 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)) 579 568 return iemRaiseGeneralProtectionFault0(pVCpu); 580 569 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */ … … 622 611 IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize) 623 612 { 624 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 625 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu, pCtx); 613 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu); 626 614 VBOXSTRICTRC rcStrict; 627 615 uint32_t fEflNew; … … 672 660 */ 673 661 else if ( enmEffOpSize == IEMMODE_16BIT 674 && (p Ctx->cr4 & X86_CR4_VME) )662 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) ) 675 663 { 676 664 uint16_t u16Value; 677 665 RTUINT64U TmpRsp; 678 TmpRsp.u = p Ctx->rsp;666 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 679 667 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp); 680 668 if (rcStrict != VINF_SUCCESS) … … 693 681 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld; 694 682 695 p Ctx->rsp = TmpRsp.u;683 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 696 684 } 697 685 else … … 727 715 */ 728 716 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286 729 && !(p Ctx->cr0 & X86_CR0_PE) )717 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) ) 730 718 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL); 731 719 break; … … 773 761 */ 774 762 Assert(fEflNew & RT_BIT_32(1)); 775 IEMMISC_SET_EFL(pVCpu, pCtx,fEflNew);763 IEMMISC_SET_EFL(pVCpu, fEflNew); 776 764 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 777 765 … … 789 777 IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC) 790 778 { 791 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 792 uint16_t uOldPC = pCtx->ip + cbInstr; 793 if (uNewPC > pCtx->cs.u32Limit) 779 uint16_t uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr; 780 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit) 794 781 return iemRaiseGeneralProtectionFault0(pVCpu); 795 782 … … 798 785 return rcStrict; 799 786 800 p Ctx->rip = uNewPC;801 p Ctx->eflags.Bits.u1RF = 0;787 pVCpu->cpum.GstCtx.rip = uNewPC; 788 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 802 789 803 790 #ifndef IEM_WITH_CODE_TLB … … 816 803 IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp) 817 804 { 818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 819 uint16_t uOldPC = pCtx->ip + cbInstr; 805 uint16_t uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr; 820 806 uint16_t uNewPC = uOldPC + offDisp; 821 if (uNewPC > p Ctx->cs.u32Limit)807 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit) 822 808 return iemRaiseGeneralProtectionFault0(pVCpu); 823 809 … … 826 812 return rcStrict; 827 813 828 p Ctx->rip = uNewPC;829 p Ctx->eflags.Bits.u1RF = 0;814 pVCpu->cpum.GstCtx.rip = uNewPC; 815 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 830 816 831 817 #ifndef IEM_WITH_CODE_TLB … … 846 832 IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC) 847 833 { 848 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 849 uint32_t uOldPC = pCtx->eip + cbInstr; 850 if (uNewPC > pCtx->cs.u32Limit) 834 uint32_t uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr; 835 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit) 851 836 return iemRaiseGeneralProtectionFault0(pVCpu); 852 837 … … 859 844 * CASM hook for recording interesting indirect calls. 860 845 */ 861 if ( !p Ctx->eflags.Bits.u1IF862 && (p Ctx->cr0 & X86_CR0_PG)846 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1IF 847 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) 863 848 && !CSAMIsEnabled(pVCpu->CTX_SUFF(pVM)) 864 849 && pVCpu->iem.s.uCpl == 0) … … 868 853 || enmState == EMSTATE_IEM 869 854 || enmState == EMSTATE_REM) 870 CSAMR3RecordCallAddress(pVCpu->CTX_SUFF(pVM), p Ctx->eip);855 CSAMR3RecordCallAddress(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip); 871 856 } 872 857 #endif 873 858 874 p Ctx->rip = uNewPC;875 p Ctx->eflags.Bits.u1RF = 0;859 pVCpu->cpum.GstCtx.rip = uNewPC; 860 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 876 861 877 862 #ifndef IEM_WITH_CODE_TLB … … 890 875 IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp) 891 876 { 892 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 893 uint32_t uOldPC = pCtx->eip + cbInstr; 877 uint32_t uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr; 894 878 uint32_t uNewPC = uOldPC + offDisp; 895 if (uNewPC > p Ctx->cs.u32Limit)879 if (uNewPC > pVCpu->cpum.GstCtx.cs.u32Limit) 896 880 return iemRaiseGeneralProtectionFault0(pVCpu); 897 881 … … 900 884 return rcStrict; 901 885 902 p Ctx->rip = uNewPC;903 p Ctx->eflags.Bits.u1RF = 0;886 pVCpu->cpum.GstCtx.rip = uNewPC; 887 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 904 888 905 889 #ifndef IEM_WITH_CODE_TLB … … 920 904 IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC) 921 905 { 922 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 923 uint64_t uOldPC = pCtx->rip + cbInstr; 906 uint64_t uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr; 924 907 if (!IEM_IS_CANONICAL(uNewPC)) 925 908 return iemRaiseGeneralProtectionFault0(pVCpu); … … 929 912 return rcStrict; 930 913 931 p Ctx->rip = uNewPC;932 p Ctx->eflags.Bits.u1RF = 0;914 pVCpu->cpum.GstCtx.rip = uNewPC; 915 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 933 916 934 917 #ifndef IEM_WITH_CODE_TLB … … 947 930 IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp) 948 931 { 949 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 950 uint64_t uOldPC = pCtx->rip + cbInstr; 932 uint64_t uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr; 951 933 uint64_t uNewPC = uOldPC + offDisp; 952 934 if (!IEM_IS_CANONICAL(uNewPC)) … … 957 939 return rcStrict; 958 940 959 p Ctx->rip = uNewPC;960 p Ctx->eflags.Bits.u1RF = 0;941 pVCpu->cpum.GstCtx.rip = uNewPC; 942 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 961 943 962 944 #ifndef IEM_WITH_CODE_TLB … … 987 969 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL); 988 970 RT_NOREF_PV(enmEffOpSize); 989 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 990 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 971 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 991 972 992 973 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl … … 1007 988 } 1008 989 1009 uint32_t uNextEip = p Ctx->eip + cbInstr;1010 return iemTaskSwitch(pVCpu, pCtx,enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,990 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr; 991 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, 1011 992 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc); 1012 993 #endif … … 1030 1011 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 1031 1012 RT_NOREF_PV(enmEffOpSize); 1032 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1033 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1013 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1034 1014 1035 1015 if ( pDesc->Legacy.Gate.u2Dpl < pVCpu->iem.s.uCpl … … 1078 1058 } 1079 1059 1080 uint32_t uNextEip = p Ctx->eip + cbInstr;1081 return iemTaskSwitch(pVCpu, pCtx,enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,1060 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr; 1061 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL, 1082 1062 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc); 1083 1063 #endif … … 1101 1081 #else 1102 1082 RT_NOREF_PV(enmEffOpSize); 1103 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1104 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1083 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1105 1084 1106 1085 /* NB: Far jumps can only do intra-privilege transfers. Far calls support … … 1267 1246 1268 1247 /* commit */ 1269 p Ctx->rip = uNewRip;1270 p Ctx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;1271 p Ctx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */1272 p Ctx->cs.ValidSel = pCtx->cs.Sel;1273 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;1274 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);1275 p Ctx->cs.u32Limit = cbLimit;1276 p Ctx->cs.u64Base = u64Base;1277 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);1248 pVCpu->cpum.GstCtx.rip = uNewRip; 1249 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1250 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */ 1251 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel; 1252 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 1253 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 1254 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit; 1255 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 1256 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 1278 1257 } 1279 1258 else … … 1303 1282 * Determine new SS:rSP from the TSS. 1304 1283 */ 1305 Assert(!p Ctx->tr.Attr.n.u1DescType);1284 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType); 1306 1285 1307 1286 /* Figure out where the new stack pointer is stored in the TSS. */ … … 1309 1288 if (!IEM_IS_LONG_MODE(pVCpu)) 1310 1289 { 1311 if (p Ctx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)1290 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY) 1312 1291 { 1313 1292 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8; … … 1316 1295 else 1317 1296 { 1318 Assert(p Ctx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);1297 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY); 1319 1298 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4; 1320 1299 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0); … … 1323 1302 else 1324 1303 { 1325 Assert(p Ctx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);1304 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY); 1326 1305 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0); 1327 1306 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0); … … 1329 1308 1330 1309 /* Check against TSS limit. */ 1331 if ((uint16_t)(offNewStack + cbNewStack - 1) > p Ctx->tr.u32Limit)1332 { 1333 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, p Ctx->tr.u32Limit));1334 return iemRaiseTaskSwitchFaultBySelector(pVCpu, p Ctx->tr.Sel);1335 } 1336 1337 GCPtrTSS = p Ctx->tr.u64Base + offNewStack;1310 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit) 1311 { 1312 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit)); 1313 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel); 1314 } 1315 1316 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack; 1338 1317 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R); 1339 1318 if (rcStrict != VINF_SUCCESS) … … 1345 1324 if (!IEM_IS_LONG_MODE(pVCpu)) 1346 1325 { 1347 if (p Ctx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)1326 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY) 1348 1327 { 1349 1328 uNewRsp = uPtrTSS.pu32[0]; … … 1352 1331 else 1353 1332 { 1354 Assert(p Ctx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);1333 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY); 1355 1334 uNewRsp = uPtrTSS.pu16[0]; 1356 1335 uNewSS = uPtrTSS.pu16[1]; … … 1359 1338 else 1360 1339 { 1361 Assert(p Ctx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);1340 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY); 1362 1341 /* SS will be a NULL selector, but that's valid. */ 1363 1342 uNewRsp = uPtrTSS.pu64[0]; … … 1453 1432 1454 1433 /* Remember the old SS:rSP and their linear address. */ 1455 uOldSS = p Ctx->ss.Sel;1456 uOldRsp = p Ctx->ss.Attr.n.u1DefBig ? pCtx->rsp : pCtx->sp;1457 1458 GCPtrParmWds = p Ctx->ss.u64Base + uOldRsp;1434 uOldSS = pVCpu->cpum.GstCtx.ss.Sel; 1435 uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp; 1436 1437 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp; 1459 1438 1460 1439 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS) … … 1479 1458 1480 1459 /* Commit new SS:rSP. */ 1481 p Ctx->ss.Sel = uNewSS;1482 p Ctx->ss.ValidSel = uNewSS;1483 p Ctx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);1484 p Ctx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);1485 p Ctx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);1486 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;1487 p Ctx->rsp = uNewRsp;1460 pVCpu->cpum.GstCtx.ss.Sel = uNewSS; 1461 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS; 1462 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 1463 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy); 1464 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 1465 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 1466 pVCpu->cpum.GstCtx.rsp = uNewRsp; 1488 1467 pVCpu->iem.s.uCpl = uNewCSDpl; 1489 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ss));1468 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 1490 1469 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); 1491 1470 … … 1502 1481 { 1503 1482 /* Push the old CS:rIP. */ 1504 uPtrRet.pu32[0] = p Ctx->eip + cbInstr;1505 uPtrRet.pu32[1] = p Ctx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */1483 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr; 1484 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */ 1506 1485 1507 1486 if (cbWords) … … 1537 1516 1538 1517 /* Push the old CS:rIP. */ 1539 uPtrRet.pu16[0] = p Ctx->ip + cbInstr;1540 uPtrRet.pu16[1] = p Ctx->cs.Sel;1518 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr; 1519 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel; 1541 1520 1542 1521 if (cbWords) … … 1573 1552 1574 1553 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */ 1575 uPtrRet.pu64[0] = p Ctx->rip + cbInstr;1576 uPtrRet.pu64[1] = p Ctx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */1554 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr; 1555 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */ 1577 1556 uPtrRet.pu64[2] = uOldRsp; 1578 1557 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */ … … 1628 1607 1629 1608 /* Commit new CS:rIP. */ 1630 p Ctx->rip = uNewRip;1631 p Ctx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;1632 p Ctx->cs.Sel |= pVCpu->iem.s.uCpl;1633 p Ctx->cs.ValidSel = pCtx->cs.Sel;1634 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;1635 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);1636 p Ctx->cs.u32Limit = cbLimit;1637 p Ctx->cs.u64Base = u64Base;1638 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);1609 pVCpu->cpum.GstCtx.rip = uNewRip; 1610 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1611 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; 1612 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel; 1613 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 1614 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 1615 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit; 1616 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 1617 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 1639 1618 } 1640 1619 else … … 1698 1677 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) 1699 1678 { 1700 uPtrRet.pu32[0] = p Ctx->eip + cbInstr;1701 uPtrRet.pu32[1] = p Ctx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */1679 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr; 1680 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */ 1702 1681 } 1703 1682 else 1704 1683 { 1705 1684 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE); 1706 uPtrRet.pu16[0] = p Ctx->ip + cbInstr;1707 uPtrRet.pu16[1] = p Ctx->cs.Sel;1685 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr; 1686 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel; 1708 1687 } 1709 1688 } … … 1711 1690 { 1712 1691 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE); 1713 uPtrRet.pu64[0] = p Ctx->rip + cbInstr;1714 uPtrRet.pu64[1] = p Ctx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */1692 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr; 1693 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */ 1715 1694 } 1716 1695 … … 1720 1699 1721 1700 /* commit */ 1722 p Ctx->rip = uNewRip;1723 p Ctx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;1724 p Ctx->cs.Sel |= pVCpu->iem.s.uCpl;1725 p Ctx->cs.ValidSel = pCtx->cs.Sel;1726 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;1727 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);1728 p Ctx->cs.u32Limit = cbLimit;1729 p Ctx->cs.u64Base = u64Base;1730 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);1731 } 1732 } 1733 p Ctx->eflags.Bits.u1RF = 0;1701 pVCpu->cpum.GstCtx.rip = uNewRip; 1702 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1703 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; 1704 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel; 1705 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 1706 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 1707 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit; 1708 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 1709 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 1710 } 1711 } 1712 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 1734 1713 1735 1714 /* Flush the prefetch buffer. */ … … 1756 1735 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 1757 1736 Assert((uSel & X86_SEL_MASK_OFF_RPL)); 1758 IEM_CTX_IMPORT_RET(pVCpu, IEM_ GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_XCPT_MASK);1737 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 1759 1738 1760 1739 if (IEM_IS_LONG_MODE(pVCpu)) … … 1816 1795 IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize) 1817 1796 { 1818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1819 1797 NOREF(cbInstr); 1820 1798 Assert(offSeg <= UINT32_MAX); … … 1835 1813 { 1836 1814 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT); 1837 if (offSeg > p Ctx->cs.u32Limit)1815 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit) 1838 1816 { 1839 1817 Log(("iemCImpl_FarJmp: 16-bit limit\n")); … … 1842 1820 1843 1821 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */ 1844 p Ctx->rip = offSeg;1822 pVCpu->cpum.GstCtx.rip = offSeg; 1845 1823 else 1846 p Ctx->rip = offSeg & UINT16_MAX;1847 p Ctx->cs.Sel = uSel;1848 p Ctx->cs.ValidSel = uSel;1849 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;1850 p Ctx->cs.u64Base = (uint32_t)uSel << 4;1851 p Ctx->eflags.Bits.u1RF = 0;1824 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX; 1825 pVCpu->cpum.GstCtx.cs.Sel = uSel; 1826 pVCpu->cpum.GstCtx.cs.ValidSel = uSel; 1827 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 1828 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4; 1829 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 1852 1830 return VINF_SUCCESS; 1853 1831 } … … 1957 1935 1958 1936 /* commit */ 1959 p Ctx->rip = offSeg;1960 p Ctx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;1961 p Ctx->cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */1962 p Ctx->cs.ValidSel = pCtx->cs.Sel;1963 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;1964 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);1965 p Ctx->cs.u32Limit = cbLimit;1966 p Ctx->cs.u64Base = u64Base;1967 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);1968 p Ctx->eflags.Bits.u1RF = 0;1937 pVCpu->cpum.GstCtx.rip = offSeg; 1938 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL; 1939 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; /** @todo is this right for conforming segs? or in general? */ 1940 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel; 1941 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 1942 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 1943 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit; 1944 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 1945 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 1946 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 1969 1947 /** @todo check if the hidden bits are loaded correctly for 64-bit 1970 1948 * mode. */ … … 1992 1970 IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize) 1993 1971 { 1994 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1995 1972 VBOXSTRICTRC rcStrict; 1996 1973 uint64_t uNewRsp; … … 2020 1997 if (enmEffOpSize == IEMMODE_16BIT) 2021 1998 { 2022 uPtrRet.pu16[0] = p Ctx->ip + cbInstr;2023 uPtrRet.pu16[1] = p Ctx->cs.Sel;1999 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr; 2000 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel; 2024 2001 } 2025 2002 else 2026 2003 { 2027 uPtrRet.pu32[0] = p Ctx->eip + cbInstr;2028 uPtrRet.pu16[2] = p Ctx->cs.Sel;2004 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr; 2005 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel; 2029 2006 } 2030 2007 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp); … … 2033 2010 2034 2011 /* Branch. */ 2035 p Ctx->rip = offSeg;2036 p Ctx->cs.Sel = uSel;2037 p Ctx->cs.ValidSel = uSel;2038 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;2039 p Ctx->cs.u64Base = (uint32_t)uSel << 4;2040 p Ctx->eflags.Bits.u1RF = 0;2012 pVCpu->cpum.GstCtx.rip = offSeg; 2013 pVCpu->cpum.GstCtx.cs.Sel = uSel; 2014 pVCpu->cpum.GstCtx.cs.ValidSel = uSel; 2015 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 2016 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4; 2017 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 2041 2018 return VINF_SUCCESS; 2042 2019 } … … 2166 2143 if (enmEffOpSize == IEMMODE_16BIT) 2167 2144 { 2168 uPtrRet.pu16[0] = p Ctx->ip + cbInstr;2169 uPtrRet.pu16[1] = p Ctx->cs.Sel;2145 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr; 2146 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel; 2170 2147 } 2171 2148 else if (enmEffOpSize == IEMMODE_32BIT) 2172 2149 { 2173 uPtrRet.pu32[0] = p Ctx->eip + cbInstr;2174 uPtrRet.pu32[1] = p Ctx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */2150 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr; 2151 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */ 2175 2152 } 2176 2153 else 2177 2154 { 2178 uPtrRet.pu64[0] = p Ctx->rip + cbInstr;2179 uPtrRet.pu64[1] = p Ctx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */2155 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr; 2156 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */ 2180 2157 } 2181 2158 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp); … … 2184 2161 2185 2162 /* commit */ 2186 p Ctx->rip = offSeg;2187 p Ctx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;2188 p Ctx->cs.Sel |= pVCpu->iem.s.uCpl;2189 p Ctx->cs.ValidSel = pCtx->cs.Sel;2190 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;2191 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);2192 p Ctx->cs.u32Limit = cbLimit;2193 p Ctx->cs.u64Base = u64Base;2194 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);2195 p Ctx->eflags.Bits.u1RF = 0;2163 pVCpu->cpum.GstCtx.rip = offSeg; 2164 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL; 2165 pVCpu->cpum.GstCtx.cs.Sel |= pVCpu->iem.s.uCpl; 2166 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel; 2167 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 2168 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2169 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit; 2170 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 2171 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 2172 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 2196 2173 /** @todo check if the hidden bits are loaded correctly for 64-bit 2197 2174 * mode. */ … … 2216 2193 IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) 2217 2194 { 2218 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);2219 2195 VBOXSTRICTRC rcStrict; 2220 2196 RTCPTRUNION uPtrFrame; … … 2265 2241 /** @todo Intel pseudo code only does the limit check for 16-bit 2266 2242 * operands, AMD does not make any distinction. What is right? */ 2267 if (uNewRip > p Ctx->cs.u32Limit)2243 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit) 2268 2244 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 2269 2245 2270 2246 /* commit the operation. */ 2271 p Ctx->rsp = uNewRsp;2272 p Ctx->rip = uNewRip;2273 p Ctx->cs.Sel = uNewCs;2274 p Ctx->cs.ValidSel = uNewCs;2275 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;2276 p Ctx->cs.u64Base = (uint32_t)uNewCs << 4;2277 p Ctx->eflags.Bits.u1RF = 0;2247 pVCpu->cpum.GstCtx.rsp = uNewRsp; 2248 pVCpu->cpum.GstCtx.rip = uNewRip; 2249 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 2250 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 2251 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 2252 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4; 2253 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 2278 2254 if (cbPop) 2279 iemRegAddToRsp(pVCpu, pCtx,cbPop);2255 iemRegAddToRsp(pVCpu, cbPop); 2280 2256 return VINF_SUCCESS; 2281 2257 } … … 2290 2266 } 2291 2267 2292 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);2268 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 2293 2269 2294 2270 /* Fetch the descriptor. */ … … 2506 2482 /* commit */ 2507 2483 if (enmEffOpSize == IEMMODE_16BIT) 2508 p Ctx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */2484 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */ 2509 2485 else 2510 p Ctx->rip = uNewRip;2511 p Ctx->cs.Sel = uNewCs;2512 p Ctx->cs.ValidSel = uNewCs;2513 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;2514 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);2515 p Ctx->cs.u32Limit = cbLimitCs;2516 p Ctx->cs.u64Base = u64Base;2517 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);2518 p Ctx->ss.Sel = uNewOuterSs;2519 p Ctx->ss.ValidSel = uNewOuterSs;2520 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;2521 p Ctx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);2522 p Ctx->ss.u32Limit = cbLimitSs;2486 pVCpu->cpum.GstCtx.rip = uNewRip; 2487 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 2488 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 2489 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 2490 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy); 2491 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs; 2492 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 2493 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 2494 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs; 2495 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs; 2496 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 2497 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy); 2498 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs; 2523 2499 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 2524 p Ctx->ss.u64Base = 0;2500 pVCpu->cpum.GstCtx.ss.u64Base = 0; 2525 2501 else 2526 p Ctx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);2527 if (!p Ctx->ss.Attr.n.u1DefBig)2528 p Ctx->sp = (uint16_t)uNewOuterRsp;2502 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSs.Legacy); 2503 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 2504 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewOuterRsp; 2529 2505 else 2530 p Ctx->rsp = uNewOuterRsp;2506 pVCpu->cpum.GstCtx.rsp = uNewOuterRsp; 2531 2507 2532 2508 pVCpu->iem.s.uCpl = (uNewCs & X86_SEL_RPL); 2533 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->ds);2534 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->es);2535 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->fs);2536 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->gs);2509 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds); 2510 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es); 2511 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs); 2512 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs); 2537 2513 2538 2514 /** @todo check if the hidden bits are loaded correctly for 64-bit … … 2540 2516 2541 2517 if (cbPop) 2542 iemRegAddToRsp(pVCpu, pCtx,cbPop);2543 p Ctx->eflags.Bits.u1RF = 0;2518 iemRegAddToRsp(pVCpu, cbPop); 2519 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 2544 2520 2545 2521 /* Done! */ … … 2592 2568 2593 2569 /* commit */ 2594 if (!p Ctx->ss.Attr.n.u1DefBig)2595 p Ctx->sp = (uint16_t)uNewRsp;2570 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 2571 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp; 2596 2572 else 2597 p Ctx->rsp = uNewRsp;2573 pVCpu->cpum.GstCtx.rsp = uNewRsp; 2598 2574 if (enmEffOpSize == IEMMODE_16BIT) 2599 p Ctx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */2575 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */ 2600 2576 else 2601 p Ctx->rip = uNewRip;2602 p Ctx->cs.Sel = uNewCs;2603 p Ctx->cs.ValidSel = uNewCs;2604 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;2605 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);2606 p Ctx->cs.u32Limit = cbLimitCs;2607 p Ctx->cs.u64Base = u64Base;2577 pVCpu->cpum.GstCtx.rip = uNewRip; 2578 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 2579 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 2580 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 2581 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy); 2582 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs; 2583 pVCpu->cpum.GstCtx.cs.u64Base = u64Base; 2608 2584 /** @todo check if the hidden bits are loaded correctly for 64-bit 2609 2585 * mode. */ 2610 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);2586 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 2611 2587 if (cbPop) 2612 iemRegAddToRsp(pVCpu, pCtx,cbPop);2613 p Ctx->eflags.Bits.u1RF = 0;2588 iemRegAddToRsp(pVCpu, cbPop); 2589 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 2614 2590 } 2615 2591 … … 2636 2612 IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop) 2637 2613 { 2638 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);2639 2614 NOREF(cbInstr); 2640 2615 … … 2643 2618 RTUINT64U NewRip; 2644 2619 RTUINT64U NewRsp; 2645 NewRsp.u = p Ctx->rsp;2620 NewRsp.u = pVCpu->cpum.GstCtx.rsp; 2646 2621 2647 2622 switch (enmEffOpSize) … … 2668 2643 if (enmEffOpSize != IEMMODE_64BIT) 2669 2644 { 2670 if (NewRip.DWords.dw0 > p Ctx->cs.u32Limit)2671 { 2672 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, p Ctx->cs.u32Limit));2645 if (NewRip.DWords.dw0 > pVCpu->cpum.GstCtx.cs.u32Limit) 2646 { 2647 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit)); 2673 2648 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 2674 2649 } … … 2685 2660 /* Apply cbPop */ 2686 2661 if (cbPop) 2687 iemRegAddToRspEx(pVCpu, pCtx,&NewRsp, cbPop);2662 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop); 2688 2663 2689 2664 /* Commit it. */ 2690 p Ctx->rip = NewRip.u;2691 p Ctx->rsp = NewRsp.u;2692 p Ctx->eflags.Bits.u1RF = 0;2665 pVCpu->cpum.GstCtx.rip = NewRip.u; 2666 pVCpu->cpum.GstCtx.rsp = NewRsp.u; 2667 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0; 2693 2668 2694 2669 /* Flush the prefetch buffer. */ … … 2711 2686 IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters) 2712 2687 { 2713 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);2714 2715 2688 /* Push RBP, saving the old value in TmpRbp. */ 2716 RTUINT64U NewRsp; NewRsp.u = p Ctx->rsp;2717 RTUINT64U TmpRbp; TmpRbp.u = p Ctx->rbp;2689 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp; 2690 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp; 2718 2691 RTUINT64U NewRbp; 2719 2692 VBOXSTRICTRC rcStrict; … … 2744 2717 { 2745 2718 case IEMMODE_16BIT: 2746 if (p Ctx->ss.Attr.n.u1DefBig)2719 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 2747 2720 TmpRbp.DWords.dw0 -= 2; 2748 2721 else … … 2759 2732 2760 2733 case IEMMODE_32BIT: 2761 if (p Ctx->ss.Attr.n.u1DefBig)2734 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 2762 2735 TmpRbp.DWords.dw0 -= 4; 2763 2736 else … … 2803 2776 2804 2777 /* Recalc RSP. */ 2805 iemRegSubFromRspEx(pVCpu, pCtx,&NewRsp, cbFrame);2778 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame); 2806 2779 2807 2780 /** @todo Should probe write access at the new RSP according to AMD. */ 2808 2781 2809 2782 /* Commit it. */ 2810 p Ctx->rbp = NewRbp.u;2811 p Ctx->rsp = NewRsp.u;2783 pVCpu->cpum.GstCtx.rbp = NewRbp.u; 2784 pVCpu->cpum.GstCtx.rsp = NewRsp.u; 2812 2785 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 2813 2786 … … 2827 2800 IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize) 2828 2801 { 2829 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);2830 2831 2802 /* Calculate the intermediate RSP from RBP and the stack attributes. */ 2832 2803 RTUINT64U NewRsp; 2833 2804 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 2834 NewRsp.u = p Ctx->rbp;2835 else if (p Ctx->ss.Attr.n.u1DefBig)2836 NewRsp.u = p Ctx->ebp;2805 NewRsp.u = pVCpu->cpum.GstCtx.rbp; 2806 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 2807 NewRsp.u = pVCpu->cpum.GstCtx.ebp; 2837 2808 else 2838 2809 { 2839 2810 /** @todo Check that LEAVE actually preserve the high EBP bits. */ 2840 NewRsp.u = p Ctx->rsp;2841 NewRsp.Words.w0 = p Ctx->bp;2811 NewRsp.u = pVCpu->cpum.GstCtx.rsp; 2812 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp; 2842 2813 } 2843 2814 … … 2848 2819 { 2849 2820 case IEMMODE_16BIT: 2850 NewRbp.u = p Ctx->rbp;2821 NewRbp.u = pVCpu->cpum.GstCtx.rbp; 2851 2822 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp); 2852 2823 break; … … 2865 2836 2866 2837 /* Commit it. */ 2867 p Ctx->rbp = NewRbp.u;2868 p Ctx->rsp = NewRsp.u;2838 pVCpu->cpum.GstCtx.rbp = NewRbp.u; 2839 pVCpu->cpum.GstCtx.rsp = NewRsp.u; 2869 2840 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 2870 2841 … … 2898 2869 IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize) 2899 2870 { 2900 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);2901 2871 X86EFLAGS Efl; 2902 Efl.u = IEMMISC_GET_EFL(pVCpu , pCtx);2872 Efl.u = IEMMISC_GET_EFL(pVCpu); 2903 2873 NOREF(cbInstr); 2904 2874 … … 2908 2878 if ( Efl.Bits.u1VM 2909 2879 && Efl.Bits.u2IOPL != 3 2910 && !(p Ctx->cr4 & X86_CR4_VME))2880 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)) 2911 2881 return iemRaiseGeneralProtectionFault0(pVCpu); 2912 2882 … … 2973 2943 /** @todo Only the AMD pseudo code check the limit here, what's 2974 2944 * right? */ 2975 if (uNewEip > p Ctx->cs.u32Limit)2945 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit) 2976 2946 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 2977 2947 … … 3008 2978 #ifdef DBGFTRACE_ENABLED 3009 2979 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx", 3010 p Ctx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp);2980 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp); 3011 2981 #endif 3012 p Ctx->rsp = uNewRsp;3013 p Ctx->rip = uNewEip;3014 p Ctx->cs.Sel = uNewCs;3015 p Ctx->cs.ValidSel = uNewCs;3016 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;3017 p Ctx->cs.u64Base = (uint32_t)uNewCs << 4;2982 pVCpu->cpum.GstCtx.rsp = uNewRsp; 2983 pVCpu->cpum.GstCtx.rip = uNewEip; 2984 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 2985 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 2986 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 2987 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4; 3018 2988 /** @todo do we load attribs and limit as well? */ 3019 2989 Assert(uNewFlags & X86_EFL_1); 3020 IEMMISC_SET_EFL(pVCpu, pCtx,uNewFlags);2990 IEMMISC_SET_EFL(pVCpu, uNewFlags); 3021 2991 3022 2992 /* Flush the prefetch buffer. */ … … 3053 3023 * Implements iret for protected mode returning to V8086 mode. 3054 3024 * 3055 * @param pCtx Pointer to the CPU context.3056 3025 * @param uNewEip The new EIP. 3057 3026 * @param uNewCs The new CS. … … 3061 3030 * @note This can only be a 32-bit iret du to the X86_EFL_VM position. 3062 3031 */ 3063 IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs, 3064 uint32_t, uNewFlags, uint64_t, uNewRsp) 3032 IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp) 3065 3033 { 3066 3034 RT_NOREF_PV(cbInstr); 3067 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SREG_MASK);3035 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK); 3068 3036 3069 3037 /* … … 3092 3060 #ifdef DBGFTRACE_ENABLED 3093 3061 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x", 3094 p Ctx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);3062 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp); 3095 3063 #endif 3096 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", p Ctx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));3097 3098 IEMMISC_SET_EFL(pVCpu, pCtx,uNewFlags);3099 iemCImplCommonV8086LoadSeg(&p Ctx->cs, uNewCs);3100 iemCImplCommonV8086LoadSeg(&p Ctx->ss, uNewSs);3101 iemCImplCommonV8086LoadSeg(&p Ctx->es, uNewEs);3102 iemCImplCommonV8086LoadSeg(&p Ctx->ds, uNewDs);3103 iemCImplCommonV8086LoadSeg(&p Ctx->fs, uNewFs);3104 iemCImplCommonV8086LoadSeg(&p Ctx->gs, uNewGs);3105 p Ctx->rip = (uint16_t)uNewEip;3106 p Ctx->rsp = uNewEsp; /** @todo check this out! */3064 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp)); 3065 3066 IEMMISC_SET_EFL(pVCpu, uNewFlags); 3067 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs); 3068 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs); 3069 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs); 3070 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs); 3071 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs); 3072 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs); 3073 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip; 3074 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */ 3107 3075 pVCpu->iem.s.uCpl = 3; 3108 3076 … … 3135 3103 */ 3136 3104 RTSEL uSelRet; 3137 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 3138 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pCtx->tr.u64Base); 3105 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base); 3139 3106 if (rcStrict != VINF_SUCCESS) 3140 3107 return rcStrict; … … 3173 3140 } 3174 3141 3175 uint32_t uNextEip = p Ctx->eip + cbInstr;3176 return iemTaskSwitch(pVCpu, pCtx,IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,3142 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr; 3143 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */, 3177 3144 0 /* uCr2 */, uSelRet, &TssDesc); 3178 3145 #endif … … 3187 3154 IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) 3188 3155 { 3189 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);3190 3156 NOREF(cbInstr); 3191 3157 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); … … 3194 3160 * Nested task return. 3195 3161 */ 3196 if (p Ctx->eflags.Bits.u1NT)3162 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT) 3197 3163 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize); 3198 3164 … … 3242 3208 { 3243 3209 Assert(enmEffOpSize == IEMMODE_32BIT); 3244 return IEM_CIMPL_CALL_ 5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);3210 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp); 3245 3211 } 3246 3212 … … 3284 3250 if ( pVCpu->iem.s.uCpl == 0 3285 3251 && ( !EMIsRawRing1Enabled(pVM) 3286 || p Ctx->cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )3252 || pVCpu->cpum.GstCtx.cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) ) 3287 3253 { 3288 3254 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL)); … … 3451 3417 if (pVCpu->iem.s.uCpl == 0) 3452 3418 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 3453 else if (pVCpu->iem.s.uCpl <= p Ctx->eflags.Bits.u2IOPL)3419 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL) 3454 3420 fEFlagsMask |= X86_EFL_IF; 3455 3421 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386) 3456 3422 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP); 3457 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu , pCtx);3423 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu); 3458 3424 fEFlagsNew &= ~fEFlagsMask; 3459 3425 fEFlagsNew |= uNewFlags & fEFlagsMask; 3460 3426 #ifdef DBGFTRACE_ENABLED 3461 3427 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x", 3462 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, p Ctx->cs.Sel, pCtx->eip,3428 pVCpu->iem.s.uCpl, uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, 3463 3429 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP); 3464 3430 #endif 3465 3431 3466 IEMMISC_SET_EFL(pVCpu, pCtx,fEFlagsNew);3467 p Ctx->rip = uNewEip;3468 p Ctx->cs.Sel = uNewCs;3469 p Ctx->cs.ValidSel = uNewCs;3470 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;3471 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);3472 p Ctx->cs.u32Limit = cbLimitCS;3473 p Ctx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);3474 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);3475 3476 p Ctx->ss.Sel = uNewSS;3477 p Ctx->ss.ValidSel = uNewSS;3478 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;3479 p Ctx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);3480 p Ctx->ss.u32Limit = cbLimitSs;3481 p Ctx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);3482 if (!p Ctx->ss.Attr.n.u1DefBig)3483 p Ctx->sp = (uint16_t)uNewESP;3432 IEMMISC_SET_EFL(pVCpu, fEFlagsNew); 3433 pVCpu->cpum.GstCtx.rip = uNewEip; 3434 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 3435 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 3436 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 3437 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 3438 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS; 3439 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 3440 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 3441 3442 pVCpu->cpum.GstCtx.ss.Sel = uNewSS; 3443 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS; 3444 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 3445 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 3446 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs; 3447 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 3448 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 3449 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP; 3484 3450 else 3485 p Ctx->rsp = uNewESP;3451 pVCpu->cpum.GstCtx.rsp = uNewESP; 3486 3452 3487 3453 pVCpu->iem.s.uCpl = uNewCs & X86_SEL_RPL; 3488 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->ds);3489 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->es);3490 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->fs);3491 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &p Ctx->gs);3454 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds); 3455 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es); 3456 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs); 3457 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs); 3492 3458 3493 3459 /* Done! */ … … 3519 3485 3520 3486 X86EFLAGS NewEfl; 3521 NewEfl.u = IEMMISC_GET_EFL(pVCpu , pCtx);3487 NewEfl.u = IEMMISC_GET_EFL(pVCpu); 3522 3488 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 3523 3489 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; … … 3534 3500 #ifdef DBGFTRACE_ENABLED 3535 3501 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx", 3536 pVCpu->iem.s.uCpl, p Ctx->cs.Sel, pCtx->eip,3537 uNewCs, uNewEip, uNewFlags, p Ctx->ss.Sel, uNewRsp);3502 pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, 3503 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp); 3538 3504 #endif 3539 3505 3540 IEMMISC_SET_EFL(pVCpu, pCtx,NewEfl.u);3541 p Ctx->rip = uNewEip;3542 p Ctx->cs.Sel = uNewCs;3543 p Ctx->cs.ValidSel = uNewCs;3544 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;3545 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);3546 p Ctx->cs.u32Limit = cbLimitCS;3547 p Ctx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);3548 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);3549 if (!p Ctx->ss.Attr.n.u1DefBig)3550 p Ctx->sp = (uint16_t)uNewRsp;3506 IEMMISC_SET_EFL(pVCpu, NewEfl.u); 3507 pVCpu->cpum.GstCtx.rip = uNewEip; 3508 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 3509 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 3510 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 3511 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 3512 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS; 3513 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 3514 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 3515 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig) 3516 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp; 3551 3517 else 3552 p Ctx->rsp = uNewRsp;3518 pVCpu->cpum.GstCtx.rsp = uNewRsp; 3553 3519 /* Done! */ 3554 3520 } … … 3572 3538 IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize) 3573 3539 { 3574 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);3575 3540 NOREF(cbInstr); 3576 3541 … … 3578 3543 * Nested task return is not supported in long mode. 3579 3544 */ 3580 if (p Ctx->eflags.Bits.u1NT)3581 { 3582 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", p Ctx->eflags.u));3545 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT) 3546 { 3547 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u)); 3583 3548 return iemRaiseGeneralProtectionFault0(pVCpu); 3584 3549 } … … 3809 3774 if (pVCpu->iem.s.uCpl == 0) 3810 3775 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */ 3811 else if (pVCpu->iem.s.uCpl <= p Ctx->eflags.Bits.u2IOPL)3776 else if (pVCpu->iem.s.uCpl <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL) 3812 3777 fEFlagsMask |= X86_EFL_IF; 3813 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu , pCtx);3778 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu); 3814 3779 fEFlagsNew &= ~fEFlagsMask; 3815 3780 fEFlagsNew |= uNewFlags & fEFlagsMask; 3816 3781 #ifdef DBGFTRACE_ENABLED 3817 3782 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx", 3818 pVCpu->iem.s.uCpl, uNewCpl, p Ctx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);3783 pVCpu->iem.s.uCpl, uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp); 3819 3784 #endif 3820 3785 3821 IEMMISC_SET_EFL(pVCpu, pCtx,fEFlagsNew);3822 p Ctx->rip = uNewRip;3823 p Ctx->cs.Sel = uNewCs;3824 p Ctx->cs.ValidSel = uNewCs;3825 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;3826 p Ctx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);3827 p Ctx->cs.u32Limit = cbLimitCS;3828 p Ctx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);3829 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(p Ctx);3830 if (p Ctx->cs.Attr.n.u1Long || pCtx->cs.Attr.n.u1DefBig)3831 p Ctx->rsp = uNewRsp;3786 IEMMISC_SET_EFL(pVCpu, fEFlagsNew); 3787 pVCpu->cpum.GstCtx.rip = uNewRip; 3788 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 3789 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 3790 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 3791 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 3792 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS; 3793 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 3794 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 3795 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) 3796 pVCpu->cpum.GstCtx.rsp = uNewRsp; 3832 3797 else 3833 p Ctx->sp = (uint16_t)uNewRsp;3834 p Ctx->ss.Sel = uNewSs;3835 p Ctx->ss.ValidSel = uNewSs;3798 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp; 3799 pVCpu->cpum.GstCtx.ss.Sel = uNewSs; 3800 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs; 3836 3801 if (!(uNewSs & X86_SEL_MASK_OFF_RPL)) 3837 3802 { 3838 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;3839 p Ctx->ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);3840 p Ctx->ss.u32Limit = UINT32_MAX;3841 p Ctx->ss.u64Base = 0;3803 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 3804 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT); 3805 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX; 3806 pVCpu->cpum.GstCtx.ss.u64Base = 0; 3842 3807 Log2(("iretq new SS: NULL\n")); 3843 3808 } 3844 3809 else 3845 3810 { 3846 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;3847 p Ctx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);3848 p Ctx->ss.u32Limit = cbLimitSs;3849 p Ctx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);3850 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", p Ctx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));3811 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 3812 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 3813 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs; 3814 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 3815 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u)); 3851 3816 } 3852 3817 … … 3854 3819 { 3855 3820 pVCpu->iem.s.uCpl = uNewCpl; 3856 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &p Ctx->ds);3857 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &p Ctx->es);3858 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &p Ctx->fs);3859 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &p Ctx->gs);3821 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds); 3822 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es); 3823 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs); 3824 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs); 3860 3825 } 3861 3826 … … 3900 3865 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 3901 3866 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize); 3902 IEM_CTX_IMPORT_RET(pVCpu, IEM_GET_CTX(pVCpu),CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);3867 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR); 3903 3868 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) 3904 3869 return IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize); … … 3914 3879 IEM_CIMPL_DEF_0(iemCImpl_syscall) 3915 3880 { 3916 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);3917 3918 3881 /* 3919 3882 * Check preconditions. … … 3923 3886 * hardware. 3924 3887 */ 3925 if (!(p Ctx->msrEFER & MSR_K6_EFER_SCE))3888 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE)) 3926 3889 { 3927 3890 Log(("syscall: Not enabled in EFER -> #UD\n")); 3928 3891 return iemRaiseUndefinedOpcode(pVCpu); 3929 3892 } 3930 if (!(p Ctx->cr0 & X86_CR0_PE))3893 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 3931 3894 { 3932 3895 Log(("syscall: Protected mode is required -> #GP(0)\n")); 3933 3896 return iemRaiseGeneralProtectionFault0(pVCpu); 3934 3897 } 3935 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx( pCtx))3898 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu))) 3936 3899 { 3937 3900 Log(("syscall: Only available in long mode on intel -> #UD\n")); … … 3939 3902 } 3940 3903 3941 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SYSCALL_MSRS);3904 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS); 3942 3905 3943 3906 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */ 3944 3907 /** @todo what about LDT selectors? Shouldn't matter, really. */ 3945 uint16_t uNewCs = (p Ctx->msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;3908 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL; 3946 3909 uint16_t uNewSs = uNewCs + 8; 3947 3910 if (uNewCs == 0 || uNewSs == 0) … … 3952 3915 3953 3916 /* Long mode and legacy mode differs. */ 3954 if (CPUMIsGuestInLongModeEx( pCtx))3955 { 3956 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? p Ctx->msrLSTAR : pCtx->msrCSTAR;3917 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu))) 3918 { 3919 uint64_t uNewRip = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR; 3957 3920 3958 3921 /* This test isn't in the docs, but I'm not trusting the guys writing … … 3967 3930 * Commit it. 3968 3931 */ 3969 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", p Ctx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));3970 p Ctx->rcx = pCtx->rip + cbInstr;3971 p Ctx->rip = uNewRip;3972 3973 p Ctx->rflags.u &= ~X86_EFL_RF;3974 p Ctx->r11 = pCtx->rflags.u;3975 p Ctx->rflags.u &= ~pCtx->msrSFMASK;3976 p Ctx->rflags.u |= X86_EFL_1;3977 3978 p Ctx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;3979 p Ctx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;3932 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip)); 3933 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr; 3934 pVCpu->cpum.GstCtx.rip = uNewRip; 3935 3936 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF; 3937 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u; 3938 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK; 3939 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1; 3940 3941 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC; 3942 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC; 3980 3943 } 3981 3944 else … … 3985 3948 */ 3986 3949 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", 3987 p Ctx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));3988 p Ctx->rcx = pCtx->eip + cbInstr;3989 p Ctx->rip = pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;3990 p Ctx->rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);3991 3992 p Ctx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;3993 p Ctx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;3994 } 3995 p Ctx->cs.Sel = uNewCs;3996 p Ctx->cs.ValidSel = uNewCs;3997 p Ctx->cs.u64Base = 0;3998 p Ctx->cs.u32Limit = UINT32_MAX;3999 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;4000 4001 p Ctx->ss.Sel = uNewSs;4002 p Ctx->ss.ValidSel = uNewSs;4003 p Ctx->ss.u64Base = 0;4004 p Ctx->ss.u32Limit = UINT32_MAX;4005 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;3950 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK))); 3951 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr; 3952 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK; 3953 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF); 3954 3955 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC; 3956 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC; 3957 } 3958 pVCpu->cpum.GstCtx.cs.Sel = uNewCs; 3959 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs; 3960 pVCpu->cpum.GstCtx.cs.u64Base = 0; 3961 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX; 3962 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 3963 3964 pVCpu->cpum.GstCtx.ss.Sel = uNewSs; 3965 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs; 3966 pVCpu->cpum.GstCtx.ss.u64Base = 0; 3967 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX; 3968 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 4006 3969 4007 3970 /* Flush the prefetch buffer. */ … … 4023 3986 { 4024 3987 RT_NOREF_PV(cbInstr); 4025 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);4026 3988 4027 3989 /* … … 4032 3994 * hardware. 4033 3995 */ 4034 if (!(p Ctx->msrEFER & MSR_K6_EFER_SCE))3996 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE)) 4035 3997 { 4036 3998 Log(("sysret: Not enabled in EFER -> #UD\n")); 4037 3999 return iemRaiseUndefinedOpcode(pVCpu); 4038 4000 } 4039 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx( pCtx))4001 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu))) 4040 4002 { 4041 4003 Log(("sysret: Only available in long mode on intel -> #UD\n")); 4042 4004 return iemRaiseUndefinedOpcode(pVCpu); 4043 4005 } 4044 if (!(p Ctx->cr0 & X86_CR0_PE))4006 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 4045 4007 { 4046 4008 Log(("sysret: Protected mode is required -> #GP(0)\n")); … … 4053 4015 } 4054 4016 4055 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SYSCALL_MSRS);4017 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS); 4056 4018 4057 4019 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */ 4058 uint16_t uNewCs = (p Ctx->msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;4020 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL; 4059 4021 uint16_t uNewSs = uNewCs + 8; 4060 4022 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT) … … 4069 4031 * Commit it. 4070 4032 */ 4071 if (CPUMIsGuestInLongModeEx( pCtx))4033 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu))) 4072 4034 { 4073 4035 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT) 4074 4036 { 4075 4037 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", 4076 p Ctx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->rcx, pCtx->r11));4038 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11)); 4077 4039 /* Note! We disregard intel manual regarding the RCX cananonical 4078 4040 check, ask intel+xen why AMD doesn't do it. */ 4079 p Ctx->rip = pCtx->rcx;4080 p Ctx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC4041 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx; 4042 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC 4081 4043 | (3 << X86DESCATTR_DPL_SHIFT); 4082 4044 } … … 4084 4046 { 4085 4047 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", 4086 p Ctx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, pCtx->ecx, pCtx->r11));4087 p Ctx->rip = pCtx->ecx;4088 p Ctx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC4048 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11)); 4049 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx; 4050 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC 4089 4051 | (3 << X86DESCATTR_DPL_SHIFT); 4090 4052 } 4091 4053 /** @todo testcase: See what kind of flags we can make SYSRET restore and 4092 4054 * what it really ignores. RF and VM are hinted at being zero, by AMD. */ 4093 p Ctx->rflags.u = pCtx->r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);4094 p Ctx->rflags.u |= X86_EFL_1;4055 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP); 4056 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_1; 4095 4057 } 4096 4058 else 4097 4059 { 4098 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", p Ctx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));4099 p Ctx->rip = pCtx->rcx;4100 p Ctx->rflags.u |= X86_EFL_IF;4101 p Ctx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC4060 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx)); 4061 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx; 4062 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF; 4063 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC 4102 4064 | (3 << X86DESCATTR_DPL_SHIFT); 4103 4065 } 4104 p Ctx->cs.Sel = uNewCs | 3;4105 p Ctx->cs.ValidSel = uNewCs | 3;4106 p Ctx->cs.u64Base = 0;4107 p Ctx->cs.u32Limit = UINT32_MAX;4108 p Ctx->cs.fFlags = CPUMSELREG_FLAGS_VALID;4109 4110 p Ctx->ss.Sel = uNewSs | 3;4111 p Ctx->ss.ValidSel = uNewSs | 3;4112 p Ctx->ss.fFlags = CPUMSELREG_FLAGS_VALID;4066 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3; 4067 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3; 4068 pVCpu->cpum.GstCtx.cs.u64Base = 0; 4069 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX; 4070 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID; 4071 4072 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3; 4073 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3; 4074 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 4113 4075 /* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */ 4114 p Ctx->ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);4076 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT); 4115 4077 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged 4116 4078 * on sysret. */ … … 4135 4097 IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel) 4136 4098 { 4137 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4138 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 4099 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg)); 4139 4100 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg); 4140 4101 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg); … … 4324 4285 { 4325 4286 if (iSegReg == X86_SREG_SS) 4326 { 4327 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4328 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 4329 } 4287 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 4330 4288 } 4331 4289 return rcStrict; … … 4341 4299 IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize) 4342 4300 { 4343 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);4344 4301 VBOXSTRICTRC rcStrict; 4345 4302 … … 4348 4305 */ 4349 4306 RTUINT64U TmpRsp; 4350 TmpRsp.u = p Ctx->rsp;4307 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 4351 4308 switch (enmEffOpSize) 4352 4309 { … … 4385 4342 if (rcStrict == VINF_SUCCESS) 4386 4343 { 4387 p Ctx->rsp = TmpRsp.u;4344 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 4388 4345 if (iSegReg == X86_SREG_SS) 4389 EMSetInhibitInterruptsPC(pVCpu, p Ctx->rip);4346 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 4390 4347 } 4391 4348 return rcStrict; … … 4403 4360 IEMMODE, enmEffOpSize) 4404 4361 { 4405 /*PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);*/4406 VBOXSTRICTRC rcStrict;4407 4408 4362 /* 4409 4363 * Use iemCImpl_LoadSReg to do the tricky segment register loading. … … 4411 4365 /** @todo verify and test that mov, pop and lXs works the segment 4412 4366 * register loading in the exact same way. */ 4413 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);4367 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel); 4414 4368 if (rcStrict == VINF_SUCCESS) 4415 4369 { … … 4454 4408 4455 4409 /* Within the table limits? */ 4456 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);4457 4410 RTGCPTR GCPtrBase; 4458 4411 if (uSel & X86_SEL_LDT) 4459 4412 { 4460 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_LDTR);4461 if ( !p Ctx->ldtr.Attr.n.u1Present4462 || (uSel | X86_SEL_RPL_LDT) > p Ctx->ldtr.u32Limit )4413 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR); 4414 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present 4415 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit ) 4463 4416 return VINF_IEM_SELECTOR_NOT_OK; 4464 GCPtrBase = p Ctx->ldtr.u64Base;4417 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base; 4465 4418 } 4466 4419 else 4467 4420 { 4468 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_GDTR);4469 if ((uSel | X86_SEL_RPL_LDT) > p Ctx->gdtr.cbGdt)4421 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR); 4422 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt) 4470 4423 return VINF_IEM_SELECTOR_NOT_OK; 4471 GCPtrBase = p Ctx->gdtr.pGdt;4424 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt; 4472 4425 } 4473 4426 … … 4480 4433 if (!fAllowSysDesc) 4481 4434 return VINF_IEM_SELECTOR_NOT_OK; 4482 if (CPUMIsGuestInLongModeEx( pCtx))4435 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu))) 4483 4436 { 4484 4437 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8); … … 4731 4684 } 4732 4685 4733 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4734 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_GDTR); 4735 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst); 4686 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR); 4687 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst); 4736 4688 if (rcStrict == VINF_SUCCESS) 4737 4689 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 4804 4756 } 4805 4757 4806 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4807 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_IDTR); 4808 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst); 4758 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR); 4759 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst); 4809 4760 if (rcStrict == VINF_SUCCESS) 4810 4761 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 4820 4771 IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt) 4821 4772 { 4822 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);4823 4824 4773 /* 4825 4774 * Check preconditions. … … 4855 4804 4856 4805 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt)); 4857 p Ctx->fExtrn &= ~CPUMCTX_EXTRN_LDTR;4806 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR; 4858 4807 CPUMSetGuestLDTR(pVCpu, uNewLdt); 4859 p Ctx->ldtr.ValidSel = uNewLdt;4860 p Ctx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;4808 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt; 4809 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 4861 4810 if (IEM_IS_GUEST_CPU_AMD(pVCpu)) 4862 4811 { 4863 4812 /* AMD-V seems to leave the base and limit alone. */ 4864 p Ctx->ldtr.Attr.u = X86DESCATTR_UNUSABLE;4813 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE; 4865 4814 } 4866 4815 else 4867 4816 { 4868 4817 /* VT-x (Intel 3960x) seems to be doing the following. */ 4869 p Ctx->ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;4870 p Ctx->ldtr.u64Base = 0;4871 p Ctx->ldtr.u32Limit = UINT32_MAX;4818 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D; 4819 pVCpu->cpum.GstCtx.ldtr.u64Base = 0; 4820 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX; 4872 4821 } 4873 4822 … … 4879 4828 * Read the descriptor. 4880 4829 */ 4881 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);4830 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR); 4882 4831 IEMSELDESC Desc; 4883 4832 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */ … … 4935 4884 /** @todo check if the actual value is loaded or if the RPL is dropped */ 4936 4885 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4937 p Ctx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;4938 p Ctx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;4939 p Ctx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);4940 p Ctx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);4941 p Ctx->ldtr.u64Base = u64Base;4886 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL; 4887 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 4888 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 4889 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); 4890 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base; 4942 4891 4943 4892 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 4953 4902 IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr) 4954 4903 { 4955 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);4956 4957 4904 /* 4958 4905 * Check preconditions. … … 4988 4935 * Read the descriptor. 4989 4936 */ 4990 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);4937 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR); 4991 4938 IEMSELDESC Desc; 4992 4939 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */ … … 5040 4987 */ 5041 4988 void *pvDesc; 5042 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, p Ctx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);4989 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW); 5043 4990 if (rcStrict != VINF_SUCCESS) 5044 4991 return rcStrict; … … 5060 5007 /** @todo check if the actual value is loaded or if the RPL is dropped */ 5061 5008 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 5062 p Ctx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;5063 p Ctx->tr.fFlags = CPUMSELREG_FLAGS_VALID;5064 p Ctx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);5065 p Ctx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);5066 p Ctx->tr.u64Base = u64Base;5009 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL; 5010 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID; 5011 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 5012 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); 5013 pVCpu->cpum.GstCtx.tr.u64Base = u64Base; 5067 5014 5068 5015 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 5079 5026 IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg) 5080 5027 { 5081 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5082 5028 if (pVCpu->iem.s.uCpl != 0) 5083 5029 return iemRaiseGeneralProtectionFault0(pVCpu); 5084 Assert(!p Ctx->eflags.Bits.u1VM);5030 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 5085 5031 5086 5032 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(pVCpu, iCrReg)) … … 5096 5042 { 5097 5043 case 0: 5098 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR0);5099 crX = p Ctx->cr0;5044 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 5045 crX = pVCpu->cpum.GstCtx.cr0; 5100 5046 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386) 5101 5047 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */ 5102 5048 break; 5103 5049 case 2: 5104 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_CR2);5105 crX = p Ctx->cr2;5050 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2); 5051 crX = pVCpu->cpum.GstCtx.cr2; 5106 5052 break; 5107 5053 case 3: 5108 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR3);5109 crX = p Ctx->cr3;5054 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); 5055 crX = pVCpu->cpum.GstCtx.cr3; 5110 5056 break; 5111 5057 case 4: 5112 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR4);5113 crX = p Ctx->cr4;5058 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 5059 crX = pVCpu->cpum.GstCtx.cr4; 5114 5060 break; 5115 5061 case 8: 5116 5062 { 5117 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_APIC_TPR);5063 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR); 5118 5064 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5119 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))5120 { 5121 PCSVMVMCBCTRL pVmcbCtrl = &p Ctx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;5122 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))5065 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5066 { 5067 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 5068 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu))) 5123 5069 { 5124 5070 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf; … … 5160 5106 IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg) 5161 5107 { 5162 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5163 5108 VBOXSTRICTRC rcStrict; 5164 5109 int rc; … … 5178 5123 * Perform checks. 5179 5124 */ 5180 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR0);5181 p Ctx->fExtrn &= ~CPUMCTX_EXTRN_LDTR;5182 5183 uint64_t const uOldCrX = p Ctx->cr0;5125 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 5126 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR; 5127 5128 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0; 5184 5129 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS 5185 5130 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM … … 5221 5166 5222 5167 if ( !(uNewCrX & X86_CR0_PG) 5223 && (p Ctx->cr4 & X86_CR4_PCIDE))5168 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)) 5224 5169 { 5225 5170 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n")); … … 5230 5175 if ( (uNewCrX & X86_CR0_PG) 5231 5176 && !(uOldCrX & X86_CR0_PG) 5232 && (p Ctx->msrEFER & MSR_K6_EFER_LME) )5233 { 5234 if (!(p Ctx->cr4 & X86_CR4_PAE))5177 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) ) 5178 { 5179 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)) 5235 5180 { 5236 5181 Log(("Trying to enabled long mode paging without CR4.PAE set\n")); 5237 5182 return iemRaiseGeneralProtectionFault0(pVCpu); 5238 5183 } 5239 if (p Ctx->cs.Attr.n.u1Long)5184 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long) 5240 5185 { 5241 5186 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n")); … … 5272 5217 */ 5273 5218 CPUMSetGuestCR0(pVCpu, uNewCrX); 5274 Assert(p Ctx->cr0 == uNewCrX);5219 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX); 5275 5220 5276 5221 /* … … 5278 5223 */ 5279 5224 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG) 5280 && (p Ctx->msrEFER & MSR_K6_EFER_LME) )5281 { 5282 uint64_t NewEFER = p Ctx->msrEFER;5225 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) ) 5226 { 5227 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER; 5283 5228 if (uNewCrX & X86_CR0_PG) 5284 5229 NewEFER |= MSR_K6_EFER_LMA; … … 5287 5232 5288 5233 CPUMSetGuestEFER(pVCpu, NewEFER); 5289 Assert(p Ctx->msrEFER == NewEFER);5234 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER); 5290 5235 } 5291 5236 … … 5296 5241 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) ) 5297 5242 { 5298 rc = PGMFlushTLB(pVCpu, p Ctx->cr3, true /* global */);5243 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */); 5299 5244 AssertRCReturn(rc, rc); 5300 5245 /* ignore informational status codes */ 5301 5246 } 5302 rcStrict = PGMChangeMode(pVCpu, p Ctx->cr0, pCtx->cr4, pCtx->msrEFER);5247 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER); 5303 5248 5304 5249 #ifdef IN_RC … … 5323 5268 IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg); 5324 5269 } 5325 p Ctx->cr2 = uNewCrX;5326 p Ctx->fExtrn &= ~CPUMCTX_EXTRN_CR2;5270 pVCpu->cpum.GstCtx.cr2 = uNewCrX; 5271 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2; 5327 5272 rcStrict = VINF_SUCCESS; 5328 5273 break; … … 5340 5285 case 3: 5341 5286 { 5342 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR3);5287 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); 5343 5288 5344 5289 /* clear bit 63 from the source operand and indicate no invalidations are required. */ 5345 if ( (p Ctx->cr4 & X86_CR4_PCIDE)5290 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE) 5346 5291 && (uNewCrX & RT_BIT_64(63))) 5347 5292 { … … 5361 5306 5362 5307 uint64_t fValid; 5363 if ( (p Ctx->cr4 & X86_CR4_PAE)5364 && (p Ctx->msrEFER & MSR_K6_EFER_LME))5308 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) 5309 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME)) 5365 5310 fValid = UINT64_C(0x000fffffffffffff); 5366 5311 else … … 5388 5333 5389 5334 /* Inform PGM. */ 5390 if (p Ctx->cr0 & X86_CR0_PG)5391 { 5392 rc = PGMFlushTLB(pVCpu, p Ctx->cr3, !(pCtx->cr4 & X86_CR4_PGE));5335 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) 5336 { 5337 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE)); 5393 5338 AssertRCReturn(rc, rc); 5394 5339 /* ignore informational status codes */ … … 5404 5349 case 4: 5405 5350 { 5406 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR4);5407 uint64_t const uOldCrX = p Ctx->cr4;5351 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 5352 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4; 5408 5353 5409 5354 /** @todo Shouldn't this look at the guest CPUID bits to determine … … 5432 5377 5433 5378 bool const fPcide = ((uNewCrX ^ uOldCrX) & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE); 5434 bool const fLongMode = CPUMIsGuestInLongModeEx( pCtx);5379 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)); 5435 5380 5436 5381 /* PCIDE check. */ 5437 5382 if ( fPcide 5438 5383 && ( !fLongMode 5439 || (p Ctx->cr3 & UINT64_C(0xfff))))5440 { 5441 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (p Ctx->cr3 & UINT64_C(0xfff))));5384 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff)))) 5385 { 5386 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff)))); 5442 5387 return iemRaiseGeneralProtectionFault0(pVCpu); 5443 5388 } … … 5464 5409 rc = CPUMSetGuestCR4(pVCpu, uNewCrX); 5465 5410 AssertRCSuccessReturn(rc, rc); 5466 Assert(p Ctx->cr4 == uNewCrX);5411 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX); 5467 5412 5468 5413 /* … … 5483 5428 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */)) 5484 5429 { 5485 rc = PGMFlushTLB(pVCpu, p Ctx->cr3, true /* global */);5430 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */); 5486 5431 AssertRCReturn(rc, rc); 5487 5432 /* ignore informational status codes */ 5488 5433 } 5489 rcStrict = PGMChangeMode(pVCpu, p Ctx->cr0, pCtx->cr4, pCtx->msrEFER);5434 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER); 5490 5435 break; 5491 5436 } … … 5496 5441 case 8: 5497 5442 { 5498 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_APIC_TPR);5443 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR); 5499 5444 if (uNewCrX & ~(uint64_t)0xf) 5500 5445 { … … 5504 5449 5505 5450 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 5506 if (CPUMIsGuestInSvmNestedHwVirtMode( pCtx))5451 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5507 5452 { 5508 5453 if (IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8)) … … 5513 5458 } 5514 5459 5515 PSVMVMCBCTRL pVmcbCtrl = &p Ctx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;5460 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 5516 5461 pVmcbCtrl->IntCtrl.n.u8VTPR = uNewCrX; 5517 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx))5462 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu))) 5518 5463 { 5519 5464 rcStrict = VINF_SUCCESS; … … 5576 5521 IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw) 5577 5522 { 5578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5579 5580 5523 if (pVCpu->iem.s.uCpl != 0) 5581 5524 return iemRaiseGeneralProtectionFault0(pVCpu); 5582 Assert(!p Ctx->eflags.Bits.u1VM);5525 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 5583 5526 5584 5527 /* 5585 5528 * Compose the new CR0 value and call common worker. 5586 5529 */ 5587 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR0);5588 uint64_t uNewCr0 = p Ctx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);5530 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 5531 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5589 5532 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 5590 5533 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */); … … 5600 5543 return iemRaiseGeneralProtectionFault0(pVCpu); 5601 5544 5602 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5603 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0); 5604 uint64_t uNewCr0 = pCtx->cr0; 5545 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 5546 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0; 5605 5547 uNewCr0 &= ~X86_CR0_TS; 5606 5548 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */); … … 5616 5558 IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg) 5617 5559 { 5618 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5619 5620 5560 /* 5621 5561 * Check preconditions. … … 5625 5565 if (pVCpu->iem.s.uCpl != 0) 5626 5566 return iemRaiseGeneralProtectionFault0(pVCpu); 5627 Assert(!p Ctx->eflags.Bits.u1VM);5628 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR0);5567 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 5568 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR0); 5629 5569 5630 5570 if ( (iDrReg == 4 || iDrReg == 5) 5631 && (p Ctx->cr4 & X86_CR4_DE) )5571 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE) ) 5632 5572 { 5633 5573 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg)); … … 5636 5576 5637 5577 /* Raise #DB if general access detect is enabled. */ 5638 if (p Ctx->dr[7] & X86_DR7_GD)5578 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD) 5639 5579 { 5640 5580 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg)); … … 5649 5589 { 5650 5590 case 0: 5651 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR0_DR3);5652 drX = p Ctx->dr[0];5591 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3); 5592 drX = pVCpu->cpum.GstCtx.dr[0]; 5653 5593 break; 5654 5594 case 1: 5655 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR0_DR3);5656 drX = p Ctx->dr[1];5595 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3); 5596 drX = pVCpu->cpum.GstCtx.dr[1]; 5657 5597 break; 5658 5598 case 2: 5659 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR0_DR3);5660 drX = p Ctx->dr[2];5599 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3); 5600 drX = pVCpu->cpum.GstCtx.dr[2]; 5661 5601 break; 5662 5602 case 3: 5663 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR0_DR3);5664 drX = p Ctx->dr[3];5603 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3); 5604 drX = pVCpu->cpum.GstCtx.dr[3]; 5665 5605 break; 5666 5606 case 6: 5667 5607 case 4: 5668 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR6);5669 drX = p Ctx->dr[6];5608 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6); 5609 drX = pVCpu->cpum.GstCtx.dr[6]; 5670 5610 drX |= X86_DR6_RA1_MASK; 5671 5611 drX &= ~X86_DR6_RAZ_MASK; … … 5673 5613 case 7: 5674 5614 case 5: 5675 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_DR7);5676 drX = p Ctx->dr[7];5615 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7); 5616 drX = pVCpu->cpum.GstCtx.dr[7]; 5677 5617 drX |=X86_DR7_RA1_MASK; 5678 5618 drX &= ~X86_DR7_RAZ_MASK; … … 5711 5651 IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg) 5712 5652 { 5713 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5714 5715 5653 /* 5716 5654 * Check preconditions. … … 5718 5656 if (pVCpu->iem.s.uCpl != 0) 5719 5657 return iemRaiseGeneralProtectionFault0(pVCpu); 5720 Assert(!p Ctx->eflags.Bits.u1VM);5721 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR4);5658 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 5659 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_CR4); 5722 5660 5723 5661 if (iDrReg == 4 || iDrReg == 5) 5724 5662 { 5725 if (p Ctx->cr4 & X86_CR4_DE)5663 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE) 5726 5664 { 5727 5665 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg)); … … 5734 5672 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6 5735 5673 * \#GP? */ 5736 if (p Ctx->dr[7] & X86_DR7_GD)5674 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD) 5737 5675 { 5738 5676 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg)); … … 5800 5738 */ 5801 5739 if (iDrReg < 4) 5802 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR0_DR3);5740 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3); 5803 5741 else if (iDrReg == 6) 5804 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR6);5742 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6); 5805 5743 5806 5744 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX); … … 5824 5762 return iemRaiseGeneralProtectionFault0(pVCpu); 5825 5763 Assert(!IEM_GET_CTX(pVCpu)->eflags.Bits.u1VM); 5826 IEM_CTX_ASSERT( IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);5764 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); 5827 5765 5828 5766 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG)) … … 5877 5815 return iemRaiseGeneralProtectionFault0(pVCpu); 5878 5816 } 5879 IEM_CTX_ASSERT( IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);5817 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER); 5880 5818 5881 5819 /* … … 5957 5895 IEM_CIMPL_DEF_0(iemCImpl_rdtsc) 5958 5896 { 5959 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5960 5961 5897 /* 5962 5898 * Check preconditions. … … 5965 5901 return iemRaiseUndefinedOpcode(pVCpu); 5966 5902 5967 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR4);5968 if ( (p Ctx->cr4 & X86_CR4_TSD)5903 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 5904 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD) 5969 5905 && pVCpu->iem.s.uCpl != 0) 5970 5906 { … … 5987 5923 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks); 5988 5924 #endif 5989 p Ctx->rax = RT_LO_U32(uTicks);5990 p Ctx->rdx = RT_HI_U32(uTicks);5925 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks); 5926 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks); 5991 5927 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5992 5928 return VINF_SUCCESS; … … 5999 5935 IEM_CIMPL_DEF_0(iemCImpl_rdtscp) 6000 5936 { 6001 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6002 6003 5937 /* 6004 5938 * Check preconditions. … … 6007 5941 return iemRaiseUndefinedOpcode(pVCpu); 6008 5942 6009 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR4);6010 if ( (p Ctx->cr4 & X86_CR4_TSD)5943 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 5944 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD) 6011 5945 && pVCpu->iem.s.uCpl != 0) 6012 5946 { … … 6026 5960 * Query the MSR first in case of trips to ring-3. 6027 5961 */ 6028 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_TSC_AUX);6029 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &p Ctx->rcx);5962 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX); 5963 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx); 6030 5964 if (rcStrict == VINF_SUCCESS) 6031 5965 { 6032 5966 /* Low dword of the TSC_AUX msr only. */ 6033 p Ctx->rcx &= UINT32_C(0xffffffff);5967 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff); 6034 5968 6035 5969 uint64_t uTicks = TMCpuTickGet(pVCpu); … … 6037 5971 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks); 6038 5972 #endif 6039 p Ctx->rax = RT_LO_U32(uTicks);6040 p Ctx->rdx = RT_HI_U32(uTicks);5973 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks); 5974 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks); 6041 5975 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6042 5976 } … … 6050 5984 IEM_CIMPL_DEF_0(iemCImpl_rdpmc) 6051 5985 { 6052 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6053 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 5986 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 6054 5987 6055 5988 if ( pVCpu->iem.s.uCpl != 0 6056 && !(p Ctx->cr4 & X86_CR4_PCE))5989 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE)) 6057 5990 return iemRaiseGeneralProtectionFault0(pVCpu); 6058 5991 … … 6076 6009 IEM_CIMPL_DEF_0(iemCImpl_rdmsr) 6077 6010 { 6078 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6079 6080 6011 /* 6081 6012 * Check preconditions. … … 6094 6025 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 6095 6026 { 6096 rcStrict = iemSvmHandleMsrIntercept(pVCpu, p Ctx, pCtx->ecx, false /* fWrite */);6027 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */); 6097 6028 if (rcStrict == VINF_SVM_VMEXIT) 6098 6029 return VINF_SUCCESS; 6099 6030 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 6100 6031 { 6101 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", p Ctx->ecx, VBOXSTRICTRC_VAL(rcStrict)));6032 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict))); 6102 6033 return rcStrict; 6103 6034 } … … 6105 6036 #endif 6106 6037 6107 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_ALL_MSRS);6108 6109 rcStrict = CPUMQueryGuestMsr(pVCpu, p Ctx->ecx, &uValue.u);6038 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 6039 6040 rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u); 6110 6041 if (rcStrict == VINF_SUCCESS) 6111 6042 { 6112 p Ctx->rax = uValue.s.Lo;6113 p Ctx->rdx = uValue.s.Hi;6043 pVCpu->cpum.GstCtx.rax = uValue.s.Lo; 6044 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi; 6114 6045 6115 6046 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 6121 6052 if (rcStrict == VINF_CPUM_R3_MSR_READ) 6122 6053 { 6123 Log(("IEM: rdmsr(%#x) -> ring-3\n", p Ctx->ecx));6054 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx)); 6124 6055 return rcStrict; 6125 6056 } … … 6128 6059 static uint32_t s_cTimes = 0; 6129 6060 if (s_cTimes++ < 10) 6130 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", p Ctx->ecx));6061 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx)); 6131 6062 else 6132 6063 #endif 6133 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", p Ctx->ecx));6064 Log(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx)); 6134 6065 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS); 6135 6066 return iemRaiseGeneralProtectionFault0(pVCpu); … … 6142 6073 IEM_CIMPL_DEF_0(iemCImpl_wrmsr) 6143 6074 { 6144 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6145 6146 6075 /* 6147 6076 * Check preconditions. … … 6156 6085 */ 6157 6086 RTUINT64U uValue; 6158 uValue.s.Lo = p Ctx->eax;6159 uValue.s.Hi = p Ctx->edx;6087 uValue.s.Lo = pVCpu->cpum.GstCtx.eax; 6088 uValue.s.Hi = pVCpu->cpum.GstCtx.edx; 6160 6089 6161 6090 VBOXSTRICTRC rcStrict; … … 6163 6092 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT)) 6164 6093 { 6165 rcStrict = iemSvmHandleMsrIntercept(pVCpu, p Ctx, pCtx->ecx, true /* fWrite */);6094 rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, true /* fWrite */); 6166 6095 if (rcStrict == VINF_SVM_VMEXIT) 6167 6096 return VINF_SUCCESS; 6168 6097 if (rcStrict != VINF_HM_INTERCEPT_NOT_ACTIVE) 6169 6098 { 6170 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", p Ctx->ecx, VBOXSTRICTRC_VAL(rcStrict)));6099 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict))); 6171 6100 return rcStrict; 6172 6101 } … … 6174 6103 #endif 6175 6104 6176 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_ALL_MSRS);6177 6178 rcStrict = CPUMSetGuestMsr(pVCpu, p Ctx->ecx, uValue.u);6105 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 6106 6107 rcStrict = CPUMSetGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, uValue.u); 6179 6108 if (rcStrict == VINF_SUCCESS) 6180 6109 { … … 6187 6116 if (rcStrict == VINF_CPUM_R3_MSR_WRITE) 6188 6117 { 6189 Log(("IEM: wrmsr(%#x) -> ring-3\n", p Ctx->ecx));6118 Log(("IEM: wrmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx)); 6190 6119 return rcStrict; 6191 6120 } … … 6194 6123 static uint32_t s_cTimes = 0; 6195 6124 if (s_cTimes++ < 10) 6196 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", p Ctx->ecx, uValue.s.Hi, uValue.s.Lo));6125 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx, uValue.s.Hi, uValue.s.Lo)); 6197 6126 else 6198 6127 #endif 6199 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", p Ctx->ecx, uValue.s.Hi, uValue.s.Lo));6128 Log(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx, uValue.s.Hi, uValue.s.Lo)); 6200 6129 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS); 6201 6130 return iemRaiseGeneralProtectionFault0(pVCpu); … … 6211 6140 IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg) 6212 6141 { 6213 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6214 6215 6142 /* 6216 6143 * CPL check 6217 6144 */ 6218 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx,u16Port, cbReg);6145 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg); 6219 6146 if (rcStrict != VINF_SUCCESS) 6220 6147 return rcStrict; … … 6256 6183 switch (cbReg) 6257 6184 { 6258 case 1: p Ctx->al = (uint8_t)u32Value; break;6259 case 2: p Ctx->ax = (uint16_t)u32Value; break;6260 case 4: p Ctx->rax = u32Value; break;6185 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break; 6186 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break; 6187 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break; 6261 6188 default: AssertFailedReturn(VERR_IEM_IPE_3); 6262 6189 } … … 6270 6197 * Check for I/O breakpoints. 6271 6198 */ 6272 uint32_t const uDr7 = p Ctx->dr[7];6199 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7]; 6273 6200 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) 6274 6201 && X86_DR7_ANY_RW_IO(uDr7) 6275 && (p Ctx->cr4 & X86_CR4_DE))6202 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)) 6276 6203 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM)))) 6277 6204 { 6278 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);6279 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);6205 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6); 6206 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg); 6280 6207 if (rcStrict == VINF_EM_RAW_GUEST_TRAP) 6281 6208 rcStrict = iemRaiseDebugException(pVCpu); … … 6306 6233 IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg) 6307 6234 { 6308 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6309 6310 6235 /* 6311 6236 * CPL check 6312 6237 */ 6313 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx,u16Port, cbReg);6238 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg); 6314 6239 if (rcStrict != VINF_SUCCESS) 6315 6240 return rcStrict; … … 6348 6273 switch (cbReg) 6349 6274 { 6350 case 1: u32Value = p Ctx->al; break;6351 case 2: u32Value = p Ctx->ax; break;6352 case 4: u32Value = p Ctx->eax; break;6275 case 1: u32Value = pVCpu->cpum.GstCtx.al; break; 6276 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break; 6277 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break; 6353 6278 default: AssertFailedReturn(VERR_IEM_IPE_4); 6354 6279 } … … 6365 6290 * Check for I/O breakpoints. 6366 6291 */ 6367 uint32_t const uDr7 = p Ctx->dr[7];6292 uint32_t const uDr7 = pVCpu->cpum.GstCtx.dr[7]; 6368 6293 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) 6369 6294 && X86_DR7_ANY_RW_IO(uDr7) 6370 && (p Ctx->cr4 & X86_CR4_DE))6295 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)) 6371 6296 || DBGFBpIsHwIoArmed(pVCpu->CTX_SUFF(pVM)))) 6372 6297 { 6373 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);6374 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx, u16Port, cbReg);6298 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6); 6299 rcStrict = DBGFBpCheckIo(pVCpu->CTX_SUFF(pVM), pVCpu, IEM_GET_CTX(pVCpu), u16Port, cbReg); 6375 6300 if (rcStrict == VINF_EM_RAW_GUEST_TRAP) 6376 6301 rcStrict = iemRaiseDebugException(pVCpu); … … 6397 6322 IEM_CIMPL_DEF_0(iemCImpl_cli) 6398 6323 { 6399 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6400 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 6324 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu); 6401 6325 uint32_t const fEflOld = fEfl; 6402 6326 6403 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);6404 if (p Ctx->cr0 & X86_CR0_PE)6327 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4); 6328 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) 6405 6329 { 6406 6330 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl); … … 6410 6334 fEfl &= ~X86_EFL_IF; 6411 6335 else if ( pVCpu->iem.s.uCpl == 3 6412 && (p Ctx->cr4 & X86_CR4_PVI) )6336 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) ) 6413 6337 fEfl &= ~X86_EFL_VIF; 6414 6338 else … … 6419 6343 fEfl &= ~X86_EFL_IF; 6420 6344 else if ( uIopl < 3 6421 && (p Ctx->cr4 & X86_CR4_VME) )6345 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) ) 6422 6346 fEfl &= ~X86_EFL_VIF; 6423 6347 else … … 6429 6353 6430 6354 /* Commit. */ 6431 IEMMISC_SET_EFL(pVCpu, pCtx,fEfl);6355 IEMMISC_SET_EFL(pVCpu, fEfl); 6432 6356 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6433 6357 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld); … … 6441 6365 IEM_CIMPL_DEF_0(iemCImpl_sti) 6442 6366 { 6443 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6444 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx); 6367 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu); 6445 6368 uint32_t const fEflOld = fEfl; 6446 6369 6447 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);6448 if (p Ctx->cr0 & X86_CR0_PE)6370 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4); 6371 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) 6449 6372 { 6450 6373 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl); … … 6454 6377 fEfl |= X86_EFL_IF; 6455 6378 else if ( pVCpu->iem.s.uCpl == 3 6456 && (p Ctx->cr4 & X86_CR4_PVI)6379 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) 6457 6380 && !(fEfl & X86_EFL_VIP) ) 6458 6381 fEfl |= X86_EFL_VIF; … … 6464 6387 fEfl |= X86_EFL_IF; 6465 6388 else if ( uIopl < 3 6466 && (p Ctx->cr4 & X86_CR4_VME)6389 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) 6467 6390 && !(fEfl & X86_EFL_VIP) ) 6468 6391 fEfl |= X86_EFL_VIF; … … 6475 6398 6476 6399 /* Commit. */ 6477 IEMMISC_SET_EFL(pVCpu, pCtx,fEfl);6400 IEMMISC_SET_EFL(pVCpu, fEfl); 6478 6401 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6479 6402 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) 6480 EMSetInhibitInterruptsPC(pVCpu, p Ctx->rip);6403 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 6481 6404 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl)); 6482 6405 return VINF_SUCCESS; … … 6526 6449 * Gather the operands and validate them. 6527 6450 */ 6528 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6529 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 6530 uint32_t uEcx = pCtx->ecx; 6531 uint32_t uEdx = pCtx->edx; 6451 RTGCPTR GCPtrMem = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 6452 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx; 6453 uint32_t uEdx = pVCpu->cpum.GstCtx.edx; 6532 6454 /** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or 6533 6455 * \#GP first. */ … … 6557 6479 * Call EM to prepare the monitor/wait. 6558 6480 */ 6559 rcStrict = EMMonitorWaitPrepare(pVCpu, p Ctx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);6481 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem); 6560 6482 Assert(rcStrict == VINF_SUCCESS); 6561 6483 … … 6589 6511 * Gather the operands and validate them. 6590 6512 */ 6591 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6592 uint32_t uEax = pCtx->eax; 6593 uint32_t uEcx = pCtx->ecx; 6513 uint32_t uEax = pVCpu->cpum.GstCtx.eax; 6514 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx; 6594 6515 if (uEcx != 0) 6595 6516 { … … 6657 6578 * Do the job. 6658 6579 */ 6659 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6660 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_OTHER_MSRS); 6661 uint64_t uOtherGsBase = pCtx->msrKERNELGSBASE; 6662 pCtx->msrKERNELGSBASE = pCtx->gs.u64Base; 6663 pCtx->gs.u64Base = uOtherGsBase; 6580 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS); 6581 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE; 6582 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base; 6583 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase; 6664 6584 6665 6585 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 6673 6593 IEM_CIMPL_DEF_0(iemCImpl_cpuid) 6674 6594 { 6675 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6676 6677 6595 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID)) 6678 6596 { … … 6682 6600 } 6683 6601 6684 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_ALL_MSRS);6685 CPUMGetGuestCpuId(pVCpu, p Ctx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);6686 p Ctx->rax &= UINT32_C(0xffffffff);6687 p Ctx->rbx &= UINT32_C(0xffffffff);6688 p Ctx->rcx &= UINT32_C(0xffffffff);6689 p Ctx->rdx &= UINT32_C(0xffffffff);6602 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 6603 CPUMGetGuestCpuId(pVCpu, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx); 6604 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff); 6605 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff); 6606 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff); 6607 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff); 6690 6608 6691 6609 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 6701 6619 IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm) 6702 6620 { 6703 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6704 6705 uint16_t const ax = pCtx->ax; 6621 uint16_t const ax = pVCpu->cpum.GstCtx.ax; 6706 6622 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm; 6707 p Ctx->ax = al;6623 pVCpu->cpum.GstCtx.ax = al; 6708 6624 iemHlpUpdateArithEFlagsU8(pVCpu, al, 6709 6625 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, … … 6722 6638 IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm) 6723 6639 { 6724 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6725 6640 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */ 6726 6641 6727 uint16_t const ax = p Ctx->ax;6642 uint16_t const ax = pVCpu->cpum.GstCtx.ax; 6728 6643 uint8_t const al = (uint8_t)ax % bImm; 6729 6644 uint8_t const ah = (uint8_t)ax / bImm; 6730 p Ctx->ax = (ah << 8) + al;6645 pVCpu->cpum.GstCtx.ax = (ah << 8) + al; 6731 6646 iemHlpUpdateArithEFlagsU8(pVCpu, al, 6732 6647 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, … … 6743 6658 IEM_CIMPL_DEF_0(iemCImpl_daa) 6744 6659 { 6745 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6746 6747 uint8_t const al = pCtx->al; 6748 bool const fCarry = pCtx->eflags.Bits.u1CF; 6749 6750 if ( pCtx->eflags.Bits.u1AF 6660 uint8_t const al = pVCpu->cpum.GstCtx.al; 6661 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF; 6662 6663 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF 6751 6664 || (al & 0xf) >= 10) 6752 6665 { 6753 p Ctx->al = al + 6;6754 p Ctx->eflags.Bits.u1AF = 1;6666 pVCpu->cpum.GstCtx.al = al + 6; 6667 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1; 6755 6668 } 6756 6669 else 6757 p Ctx->eflags.Bits.u1AF = 0;6670 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0; 6758 6671 6759 6672 if (al >= 0x9a || fCarry) 6760 6673 { 6761 p Ctx->al += 0x60;6762 p Ctx->eflags.Bits.u1CF = 1;6674 pVCpu->cpum.GstCtx.al += 0x60; 6675 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1; 6763 6676 } 6764 6677 else 6765 p Ctx->eflags.Bits.u1CF = 0;6766 6767 iemHlpUpdateArithEFlagsU8(pVCpu, p Ctx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6678 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0; 6679 6680 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6768 6681 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6769 6682 return VINF_SUCCESS; … … 6776 6689 IEM_CIMPL_DEF_0(iemCImpl_das) 6777 6690 { 6778 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6779 6780 uint8_t const uInputAL = pCtx->al; 6781 bool const fCarry = pCtx->eflags.Bits.u1CF; 6782 6783 if ( pCtx->eflags.Bits.u1AF 6691 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al; 6692 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF; 6693 6694 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF 6784 6695 || (uInputAL & 0xf) >= 10) 6785 6696 { 6786 p Ctx->eflags.Bits.u1AF = 1;6697 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1; 6787 6698 if (uInputAL < 6) 6788 p Ctx->eflags.Bits.u1CF = 1;6789 p Ctx->al = uInputAL - 6;6699 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1; 6700 pVCpu->cpum.GstCtx.al = uInputAL - 6; 6790 6701 } 6791 6702 else 6792 6703 { 6793 p Ctx->eflags.Bits.u1AF = 0;6794 p Ctx->eflags.Bits.u1CF = 0;6704 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0; 6705 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0; 6795 6706 } 6796 6707 6797 6708 if (uInputAL >= 0x9a || fCarry) 6798 6709 { 6799 p Ctx->al -= 0x60;6800 p Ctx->eflags.Bits.u1CF = 1;6801 } 6802 6803 iemHlpUpdateArithEFlagsU8(pVCpu, p Ctx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6710 pVCpu->cpum.GstCtx.al -= 0x60; 6711 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1; 6712 } 6713 6714 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6804 6715 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6805 6716 return VINF_SUCCESS; … … 6812 6723 IEM_CIMPL_DEF_0(iemCImpl_aaa) 6813 6724 { 6814 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6815 6816 6725 if (IEM_IS_GUEST_CPU_AMD(pVCpu)) 6817 6726 { 6818 if ( p Ctx->eflags.Bits.u1AF6819 || (p Ctx->ax & 0xf) >= 10)6820 { 6821 iemAImpl_add_u16(&p Ctx->ax, 0x106, &pCtx->eflags.u32);6822 p Ctx->eflags.Bits.u1AF = 1;6823 p Ctx->eflags.Bits.u1CF = 1;6727 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF 6728 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10) 6729 { 6730 iemAImpl_add_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.u32); 6731 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1; 6732 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1; 6824 6733 } 6825 6734 else 6826 6735 { 6827 iemHlpUpdateArithEFlagsU16(pVCpu, p Ctx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6828 p Ctx->eflags.Bits.u1AF = 0;6829 p Ctx->eflags.Bits.u1CF = 0;6830 } 6831 p Ctx->ax &= UINT16_C(0xff0f);6736 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6737 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0; 6738 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0; 6739 } 6740 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f); 6832 6741 } 6833 6742 else 6834 6743 { 6835 if ( p Ctx->eflags.Bits.u1AF6836 || (p Ctx->ax & 0xf) >= 10)6837 { 6838 p Ctx->ax += UINT16_C(0x106);6839 p Ctx->eflags.Bits.u1AF = 1;6840 p Ctx->eflags.Bits.u1CF = 1;6744 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF 6745 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10) 6746 { 6747 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106); 6748 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1; 6749 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1; 6841 6750 } 6842 6751 else 6843 6752 { 6844 p Ctx->eflags.Bits.u1AF = 0;6845 p Ctx->eflags.Bits.u1CF = 0;6846 } 6847 p Ctx->ax &= UINT16_C(0xff0f);6848 iemHlpUpdateArithEFlagsU8(pVCpu, p Ctx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6753 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0; 6754 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0; 6755 } 6756 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f); 6757 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6849 6758 } 6850 6759 … … 6859 6768 IEM_CIMPL_DEF_0(iemCImpl_aas) 6860 6769 { 6861 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);6862 6863 6770 if (IEM_IS_GUEST_CPU_AMD(pVCpu)) 6864 6771 { 6865 if ( p Ctx->eflags.Bits.u1AF6866 || (p Ctx->ax & 0xf) >= 10)6867 { 6868 iemAImpl_sub_u16(&p Ctx->ax, 0x106, &pCtx->eflags.u32);6869 p Ctx->eflags.Bits.u1AF = 1;6870 p Ctx->eflags.Bits.u1CF = 1;6772 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF 6773 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10) 6774 { 6775 iemAImpl_sub_u16(&pVCpu->cpum.GstCtx.ax, 0x106, &pVCpu->cpum.GstCtx.eflags.u32); 6776 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1; 6777 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1; 6871 6778 } 6872 6779 else 6873 6780 { 6874 iemHlpUpdateArithEFlagsU16(pVCpu, p Ctx->ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6875 p Ctx->eflags.Bits.u1AF = 0;6876 p Ctx->eflags.Bits.u1CF = 0;6877 } 6878 p Ctx->ax &= UINT16_C(0xff0f);6781 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6782 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0; 6783 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0; 6784 } 6785 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f); 6879 6786 } 6880 6787 else 6881 6788 { 6882 if ( p Ctx->eflags.Bits.u1AF6883 || (p Ctx->ax & 0xf) >= 10)6884 { 6885 p Ctx->ax -= UINT16_C(0x106);6886 p Ctx->eflags.Bits.u1AF = 1;6887 p Ctx->eflags.Bits.u1CF = 1;6789 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF 6790 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10) 6791 { 6792 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106); 6793 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1; 6794 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1; 6888 6795 } 6889 6796 else 6890 6797 { 6891 p Ctx->eflags.Bits.u1AF = 0;6892 p Ctx->eflags.Bits.u1CF = 0;6893 } 6894 p Ctx->ax &= UINT16_C(0xff0f);6895 iemHlpUpdateArithEFlagsU8(pVCpu, p Ctx->al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);6798 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0; 6799 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0; 6800 } 6801 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f); 6802 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF); 6896 6803 } 6897 6804 … … 6990 6897 IEM_CIMPL_DEF_0(iemCImpl_xgetbv) 6991 6898 { 6992 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 6993 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR4); 6994 if (pCtx->cr4 & X86_CR4_OSXSAVE) 6995 { 6996 uint32_t uEcx = pCtx->ecx; 6899 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 6900 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) 6901 { 6902 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx; 6997 6903 switch (uEcx) 6998 6904 { … … 7006 6912 7007 6913 } 7008 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_XCRx);7009 p Ctx->rax = RT_LO_U32(pCtx->aXcr[uEcx]);7010 p Ctx->rdx = RT_HI_U32(pCtx->aXcr[uEcx]);6914 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx); 6915 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]); 6916 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]); 7011 6917 7012 6918 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 7023 6929 IEM_CIMPL_DEF_0(iemCImpl_xsetbv) 7024 6930 { 7025 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7026 if (pCtx->cr4 & X86_CR4_OSXSAVE) 6931 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) 7027 6932 { 7028 6933 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV)) … … 7035 6940 if (pVCpu->iem.s.uCpl == 0) 7036 6941 { 7037 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_XCRx);7038 7039 uint32_t uEcx = p Ctx->ecx;7040 uint64_t uNewValue = RT_MAKE_U64(p Ctx->eax, pCtx->edx);6942 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx); 6943 6944 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx; 6945 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx); 7041 6946 switch (uEcx) 7042 6947 { … … 7126 7031 if (rcStrict == VINF_SUCCESS) 7127 7032 { 7128 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7129 pCtx->eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */ 7033 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */ 7130 7034 if (!(*pEFlags & X86_EFL_ZF)) 7131 7035 { 7132 p Ctx->rax = pu128RaxRdx->s.Lo;7133 p Ctx->rdx = pu128RaxRdx->s.Hi;7036 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo; 7037 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi; 7134 7038 } 7135 7039 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 7183 7087 IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts) 7184 7088 { 7185 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7186 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7187 7188 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS)) 7089 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7090 7091 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) 7189 7092 return iemRaiseDeviceNotAvailable(pVCpu); 7190 7093 … … 7194 7097 */ 7195 7098 7196 PX86XSAVEAREA pXState = p Ctx->CTX_SUFF(pXState);7099 PX86XSAVEAREA pXState = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); 7197 7100 pXState->x87.FCW = 0x37f; 7198 7101 pXState->x87.FSW = 0; … … 7221 7124 IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7222 7125 { 7223 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7224 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7126 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7225 7127 7226 7128 /* 7227 7129 * Raise exceptions. 7228 7130 */ 7229 if (p Ctx->cr0 & X86_CR0_EM)7131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) 7230 7132 return iemRaiseUndefinedOpcode(pVCpu); 7231 if (p Ctx->cr0 & (X86_CR0_TS | X86_CR0_EM))7133 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM)) 7232 7134 return iemRaiseDeviceNotAvailable(pVCpu); 7233 7135 if (GCPtrEff & 15) … … 7235 7137 /** @todo CPU/VM detection possible! \#AC might not be signal for 7236 7138 * all/any misalignment sizes, intel says its an implementation detail. */ 7237 if ( (p Ctx->cr0 & X86_CR0_AM)7238 && p Ctx->eflags.Bits.u1AC7139 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) 7140 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC 7239 7141 && pVCpu->iem.s.uCpl == 3) 7240 7142 return iemRaiseAlignmentCheckException(pVCpu); … … 7250 7152 return rcStrict; 7251 7153 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512; 7252 PCX86FXSTATE pSrc = &p Ctx->CTX_SUFF(pXState)->x87;7154 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7253 7155 7254 7156 /* … … 7295 7197 7296 7198 /* XMM registers. */ 7297 if ( !(p Ctx->msrEFER & MSR_K6_EFER_FFXSR)7199 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR) 7298 7200 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT 7299 7201 || pVCpu->iem.s.uCpl != 0) … … 7326 7228 IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7327 7229 { 7328 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7329 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7230 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7330 7231 7331 7232 /* 7332 7233 * Raise exceptions. 7333 7234 */ 7334 if (p Ctx->cr0 & X86_CR0_EM)7235 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) 7335 7236 return iemRaiseUndefinedOpcode(pVCpu); 7336 if (p Ctx->cr0 & (X86_CR0_TS | X86_CR0_EM))7237 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM)) 7337 7238 return iemRaiseDeviceNotAvailable(pVCpu); 7338 7239 if (GCPtrEff & 15) … … 7340 7241 /** @todo CPU/VM detection possible! \#AC might not be signal for 7341 7242 * all/any misalignment sizes, intel says its an implementation detail. */ 7342 if ( (p Ctx->cr0 & X86_CR0_AM)7343 && p Ctx->eflags.Bits.u1AC7243 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) 7244 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC 7344 7245 && pVCpu->iem.s.uCpl == 3) 7345 7246 return iemRaiseAlignmentCheckException(pVCpu); … … 7355 7256 return rcStrict; 7356 7257 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512; 7357 PX86FXSTATE pDst = &p Ctx->CTX_SUFF(pXState)->x87;7258 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7358 7259 7359 7260 /* … … 7410 7311 7411 7312 /* XMM registers. */ 7412 if ( !(p Ctx->msrEFER & MSR_K6_EFER_FFXSR)7313 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR) 7413 7314 || pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT 7414 7315 || pVCpu->iem.s.uCpl != 0) … … 7441 7342 IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7442 7343 { 7443 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7444 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 7344 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 7445 7345 7446 7346 /* 7447 7347 * Raise exceptions. 7448 7348 */ 7449 if (!(p Ctx->cr4 & X86_CR4_OSXSAVE))7349 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) 7450 7350 return iemRaiseUndefinedOpcode(pVCpu); 7451 if (p Ctx->cr0 & X86_CR0_TS)7351 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) 7452 7352 return iemRaiseDeviceNotAvailable(pVCpu); 7453 7353 if (GCPtrEff & 63) … … 7455 7355 /** @todo CPU/VM detection possible! \#AC might not be signal for 7456 7356 * all/any misalignment sizes, intel says its an implementation detail. */ 7457 if ( (p Ctx->cr0 & X86_CR0_AM)7458 && p Ctx->eflags.Bits.u1AC7357 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) 7358 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC 7459 7359 && pVCpu->iem.s.uCpl == 3) 7460 7360 return iemRaiseAlignmentCheckException(pVCpu); … … 7465 7365 * Calc the requested mask 7466 7366 */ 7467 uint64_t const fReqComponents = RT_MAKE_U64(p Ctx->eax, pCtx->edx) & pCtx->aXcr[0];7367 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0]; 7468 7368 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7469 uint64_t const fXInUse = p Ctx->aXcr[0];7369 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0]; 7470 7370 7471 7371 /** @todo figure out the exact protocol for the memory access. Currently we … … 7483 7383 return rcStrict; 7484 7384 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512; 7485 PCX86FXSTATE pSrc = &p Ctx->CTX_SUFF(pXState)->x87;7385 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7486 7386 7487 7387 /* The header. */ … … 7557 7457 { 7558 7458 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */ 7559 AssertLogRelReturn(p Ctx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);7560 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR( pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);7459 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9); 7460 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI); 7561 7461 PX86XSAVEYMMHI pCompDst; 7562 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + p Ctx->aoffXState[XSAVE_C_YMM_BIT],7462 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], 7563 7463 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7564 7464 if (rcStrict != VINF_SUCCESS) … … 7598 7498 IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7599 7499 { 7600 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7601 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 7500 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx); 7602 7501 7603 7502 /* 7604 7503 * Raise exceptions. 7605 7504 */ 7606 if (!(p Ctx->cr4 & X86_CR4_OSXSAVE))7505 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) 7607 7506 return iemRaiseUndefinedOpcode(pVCpu); 7608 if (p Ctx->cr0 & X86_CR0_TS)7507 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) 7609 7508 return iemRaiseDeviceNotAvailable(pVCpu); 7610 7509 if (GCPtrEff & 63) … … 7612 7511 /** @todo CPU/VM detection possible! \#AC might not be signal for 7613 7512 * all/any misalignment sizes, intel says its an implementation detail. */ 7614 if ( (p Ctx->cr0 & X86_CR0_AM)7615 && p Ctx->eflags.Bits.u1AC7513 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM) 7514 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC 7616 7515 && pVCpu->iem.s.uCpl == 3) 7617 7516 return iemRaiseAlignmentCheckException(pVCpu); … … 7633 7532 return rcStrict; 7634 7533 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512; 7635 PX86FXSTATE pDst = &p Ctx->CTX_SUFF(pXState)->x87;7534 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7636 7535 7637 7536 /* 7638 7537 * Calc the requested mask 7639 7538 */ 7640 PX86XSAVEHDR pHdrDst = &p Ctx->CTX_SUFF(pXState)->Hdr;7539 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->Hdr; 7641 7540 PCX86XSAVEHDR pHdrSrc; 7642 7541 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R); … … 7644 7543 return rcStrict; 7645 7544 7646 uint64_t const fReqComponents = RT_MAKE_U64(p Ctx->eax, pCtx->edx) & pCtx->aXcr[0];7545 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0]; 7647 7546 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7648 //uint64_t const fXInUse = p Ctx->aXcr[0];7547 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0]; 7649 7548 uint64_t const fRstorMask = pHdrSrc->bmXState; 7650 7549 uint64_t const fCompMask = pHdrSrc->bmXComp; … … 7756 7655 if (fReqComponents & XSAVE_C_YMM) 7757 7656 { 7758 AssertLogRelReturn(p Ctx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);7759 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR( pCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);7657 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9); 7658 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI); 7760 7659 7761 7660 if (fRstorMask & XSAVE_C_YMM) … … 7764 7663 PCX86XSAVEYMMHI pCompSrc; 7765 7664 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst), 7766 iEffSeg, GCPtrEff + p Ctx->aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R);7665 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R); 7767 7666 if (rcStrict != VINF_SUCCESS) 7768 7667 return rcStrict; … … 7803 7702 IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff) 7804 7703 { 7805 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7806 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7704 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7807 7705 7808 7706 /* 7809 7707 * Raise exceptions. 7810 7708 */ 7811 if ( !(p Ctx->cr0 & X86_CR0_EM)7812 && (p Ctx->cr4 & X86_CR4_OSFXSR))7813 { 7814 if (!(p Ctx->cr0 & X86_CR0_TS))7709 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) 7710 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) 7711 { 7712 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) 7815 7713 { 7816 7714 /* 7817 7715 * Do the job. 7818 7716 */ 7819 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, p Ctx->CTX_SUFF(pXState)->x87.MXCSR);7717 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR); 7820 7718 if (rcStrict == VINF_SUCCESS) 7821 7719 { … … 7838 7736 IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff) 7839 7737 { 7840 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7841 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx); 7738 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx); 7842 7739 7843 7740 /* … … 7845 7742 */ 7846 7743 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu) 7847 ? (p Ctx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)7848 : !(p Ctx->cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */7849 && (p Ctx->cr4 & X86_CR4_OSXSAVE))7850 { 7851 if (!(p Ctx->cr0 & X86_CR0_TS))7744 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM) 7745 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */ 7746 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) 7747 { 7748 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) 7852 7749 { 7853 7750 /* 7854 7751 * Do the job. 7855 7752 */ 7856 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, p Ctx->CTX_SUFF(pXState)->x87.MXCSR);7753 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR); 7857 7754 if (rcStrict == VINF_SUCCESS) 7858 7755 { … … 7875 7772 IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff) 7876 7773 { 7877 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7878 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7774 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX); 7879 7775 7880 7776 /* … … 7883 7779 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS 7884 7780 * happen after or before \#UD and \#EM? */ 7885 if ( !(p Ctx->cr0 & X86_CR0_EM)7886 && (p Ctx->cr4 & X86_CR4_OSFXSR))7887 { 7888 if (!(p Ctx->cr0 & X86_CR0_TS))7781 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) 7782 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) 7783 { 7784 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) 7889 7785 { 7890 7786 /* … … 7898 7794 if (!(fNewMxCsr & ~fMxCsrMask)) 7899 7795 { 7900 p Ctx->CTX_SUFF(pXState)->x87.MXCSR = fNewMxCsr;7796 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR = fNewMxCsr; 7901 7797 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7902 7798 return VINF_SUCCESS; … … 7917 7813 * Commmon routine for fnstenv and fnsave. 7918 7814 * 7919 * @param uPtr Where to store the state. 7920 * @param pCtx The CPU context. 7921 */ 7922 static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx) 7923 { 7924 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7925 PCX86FXSTATE pSrcX87 = &pCtx->CTX_SUFF(pXState)->x87; 7815 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7816 * @param enmEffOpSize The effective operand size. 7817 * @param uPtr Where to store the state. 7818 */ 7819 static void iemCImplCommonFpuStoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr) 7820 { 7821 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7822 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7926 7823 if (enmEffOpSize == IEMMODE_16BIT) 7927 7824 { … … 7981 7878 * Commmon routine for fldenv and frstor 7982 7879 * 7880 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7881 * @param enmEffOpSize The effective operand size. 7983 7882 * @param uPtr Where to store the state. 7984 * @param pCtx The CPU context. 7985 */ 7986 static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx) 7987 { 7988 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7989 PX86FXSTATE pDstX87 = &pCtx->CTX_SUFF(pXState)->x87; 7883 */ 7884 static void iemCImplCommonFpuRestoreEnv(PVMCPU pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr) 7885 { 7886 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7887 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7990 7888 if (enmEffOpSize == IEMMODE_16BIT) 7991 7889 { … … 8059 7957 IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 8060 7958 { 8061 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);8062 7959 RTPTRUNION uPtr; 8063 7960 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, … … 8066 7963 return rcStrict; 8067 7964 8068 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr , pCtx);7965 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr); 8069 7966 8070 7967 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); … … 8086 7983 IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 8087 7984 { 8088 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 8089 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 7985 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8090 7986 8091 7987 RTPTRUNION uPtr; … … 8095 7991 return rcStrict; 8096 7992 8097 PX86FXSTATE pFpuCtx = &p Ctx->CTX_SUFF(pXState)->x87;8098 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr , pCtx);7993 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 7994 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr); 8099 7995 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28)); 8100 7996 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++) … … 8139 8035 IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc) 8140 8036 { 8141 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);8142 8037 RTCPTRUNION uPtr; 8143 8038 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, … … 8146 8041 return rcStrict; 8147 8042 8148 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr , pCtx);8043 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr); 8149 8044 8150 8045 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R); … … 8166 8061 IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc) 8167 8062 { 8168 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);8169 8063 RTCPTRUNION uPtr; 8170 8064 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, … … 8173 8067 return rcStrict; 8174 8068 8175 PX86FXSTATE pFpuCtx = &p Ctx->CTX_SUFF(pXState)->x87;8176 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr , pCtx);8069 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 8070 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr); 8177 8071 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28)); 8178 8072 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++) … … 8201 8095 IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw) 8202 8096 { 8203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 8204 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8097 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8205 8098 8206 8099 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */ … … 8209 8102 /** @todo Testcase: Test that it raises and loweres the FPU exception bits 8210 8103 * according to FSW. (This is was is currently implemented.) */ 8211 PX86FXSTATE pFpuCtx = &p Ctx->CTX_SUFF(pXState)->x87;8104 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 8212 8105 pFpuCtx->FCW = u16Fcw & ~X86_FCW_ZERO_MASK; 8213 8106 iemFpuRecalcExceptionStatus(pFpuCtx); … … 8228 8121 IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg) 8229 8122 { 8230 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 8231 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8232 8233 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87; 8123 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8124 8125 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 8234 8126 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW); 8235 8127 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK; … … 8264 8156 } 8265 8157 8266 iemFpuUpdateOpcodeAndIpWorker(pVCpu, p Ctx, pFpuCtx);8158 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 8267 8159 iemHlpUsedFpu(pVCpu); 8268 8160 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 8278 8170 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop) 8279 8171 { 8280 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);8281 8172 Assert(iStReg < 8); 8282 IEM_CTX_ASSERT(p Ctx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);8173 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 8283 8174 8284 8175 /* 8285 8176 * Raise exceptions. 8286 8177 */ 8287 if (p Ctx->cr0 & (X86_CR0_EM | X86_CR0_TS))8178 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) 8288 8179 return iemRaiseDeviceNotAvailable(pVCpu); 8289 8180 8290 PX86FXSTATE pFpuCtx = &p Ctx->CTX_SUFF(pXState)->x87;8181 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87; 8291 8182 uint16_t u16Fsw = pFpuCtx->FSW; 8292 8183 if (u16Fsw & X86_FSW_ES) … … 8308 8199 || (pFpuCtx->FCW & X86_FCW_IM) ) 8309 8200 { 8310 p Ctx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);8311 p Ctx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);8201 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF); 8202 pVCpu->cpum.GstCtx.eflags.u |= pVCpu->cpum.GstCtx.eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF); 8312 8203 } 8313 8204 } … … 8317 8208 pFpuCtx->FSW &= ~X86_FSW_C1; 8318 8209 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF; 8319 p Ctx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);8320 p Ctx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;8210 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF); 8211 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF; 8321 8212 } 8322 8213 else … … 8338 8229 } 8339 8230 8340 iemFpuUpdateOpcodeAndIpWorker(pVCpu, p Ctx, pFpuCtx);8231 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx); 8341 8232 iemHlpUsedFpu(pVCpu); 8342 8233 iemRegAddToRipAndClearRF(pVCpu, cbInstr); -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r72493 r72496 166 166 } 167 167 168 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);168 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES); 169 169 170 170 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg); … … 337 337 } 338 338 339 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);339 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES); 340 340 341 341 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg); … … 508 508 } 509 509 510 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_ES);510 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES); 511 511 uint64_t uBaseAddr; 512 512 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); … … 641 641 } 642 642 643 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_ES);643 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES); 644 644 uint64_t uBaseAddr; 645 645 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr); … … 775 775 } 776 776 777 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);777 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES); 778 778 779 779 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg); … … 936 936 } 937 937 938 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_ES);938 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES); 939 939 940 940 uint64_t uBaseAddr; … … 1071 1071 } 1072 1072 1073 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg));1073 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg)); 1074 1074 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg); 1075 1075 uint64_t uBaseAddr; … … 1207 1207 if (!fIoChecked) 1208 1208 { 1209 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx , pCtx->dx, OP_SIZE / 8);1209 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx->dx, OP_SIZE / 8); 1210 1210 if (rcStrict != VINF_SUCCESS) 1211 1211 return rcStrict; … … 1269 1269 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 1270 1270 1271 IEM_CTX_IMPORT_RET(pVCpu, pCtx,CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR);1271 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR); 1272 1272 1273 1273 /* … … 1279 1279 { 1280 1280 /** @todo check if this is too early for ecx=0. */ 1281 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx,u16Port, OP_SIZE / 8);1281 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8); 1282 1282 if (rcStrict != VINF_SUCCESS) 1283 1283 return rcStrict; … … 1478 1478 if (!fIoChecked) 1479 1479 { 1480 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx , pCtx->dx, OP_SIZE / 8);1480 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx->dx, OP_SIZE / 8); 1481 1481 if (rcStrict != VINF_SUCCESS) 1482 1482 return rcStrict; … … 1538 1538 { 1539 1539 /** @todo check if this is too early for ecx=0. */ 1540 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pCtx,u16Port, OP_SIZE / 8);1540 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8); 1541 1541 if (rcStrict != VINF_SUCCESS) 1542 1542 return rcStrict; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r72469 r72496 58 58 * @returns Strict VBox status code. 59 59 * @param pVCpu The cross context virtual CPU structure. 60 * @param pCtx The guest-CPU context. 61 */ 62 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPU pVCpu, PCPUMCTX pCtx) 60 */ 61 DECLINLINE(VBOXSTRICTRC) iemSvmWorldSwitch(PVMCPU pVCpu) 63 62 { 64 63 /* … … 67 66 * see comment in iemMemPageTranslateAndCheckAccess(). 68 67 */ 69 int rc = PGMChangeMode(pVCpu, p Ctx->cr0 | X86_CR0_PE, pCtx->cr4, pCtx->msrEFER);68 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER); 70 69 # ifdef IN_RING3 71 70 Assert(rc != VINF_PGM_CHANGE_MODE); … … 82 81 if (rc == VINF_SUCCESS) 83 82 { 84 rc = PGMFlushTLB(pVCpu, p Ctx->cr3, true);83 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true); 85 84 AssertRCReturn(rc, rc); 86 85 } … … 101 100 * 102 101 * @param pVCpu The cross context virtual CPU structure. 103 * @param pCtx The guest-CPU context.104 102 * @param uExitCode The exit code. 105 103 * @param uExitInfo1 The exit info. 1 field. 106 104 * @param uExitInfo2 The exit info. 2 field. 107 105 */ 108 IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx,uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)106 IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) 109 107 { 110 108 VBOXSTRICTRC rcStrict; 111 if ( CPUMIsGuestInSvmNestedHwVirtMode( pCtx)109 if ( CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)) 112 110 || uExitCode == SVM_EXIT_INVALID) 113 111 { 114 LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", p Ctx->cs.Sel,115 p Ctx->rip, uExitCode, uExitInfo1, uExitInfo2));112 LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel, 113 pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2)); 116 114 117 115 /* 118 116 * Disable the global interrupt flag to prevent interrupts during the 'atomic' world switch. 119 117 */ 120 p Ctx->hwvirt.fGif = false;121 122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->es));123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->cs));124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ss));125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ds));118 pVCpu->cpum.GstCtx.hwvirt.fGif = false; 119 120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es)); 121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs)); 122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds)); 126 124 127 125 /* … … 132 130 PSVMVMCB pVmcbMem; 133 131 PGMPAGEMAPLOCK PgLockMem; 134 PSVMVMCBCTRL pVmcbCtrl = &p Ctx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;135 rcStrict = iemMemPageMap(pVCpu, p Ctx->hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, (void **)&pVmcbMem, &PgLockMem);132 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 133 rcStrict = iemMemPageMap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, (void **)&pVmcbMem, &PgLockMem); 136 134 if (rcStrict == VINF_SUCCESS) 137 135 { … … 141 139 * writing the VMCB back to guest memory. 142 140 */ 143 HMSvmNstGstVmExitNotify(pVCpu, pCtx);141 HMSvmNstGstVmExitNotify(pVCpu, IEM_GET_CTX(pVCpu)); 144 142 145 143 /* … … 147 145 */ 148 146 PSVMVMCBSTATESAVE pVmcbMemState = &pVmcbMem->guest; 149 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, pVmcbMemState, ES, es);150 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, pVmcbMemState, CS, cs);151 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, pVmcbMemState, SS, ss);152 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, pVmcbMemState, DS, ds);153 pVmcbMemState->GDTR.u32Limit = p Ctx->gdtr.cbGdt;154 pVmcbMemState->GDTR.u64Base = p Ctx->gdtr.pGdt;155 pVmcbMemState->IDTR.u32Limit = p Ctx->idtr.cbIdt;156 pVmcbMemState->IDTR.u64Base = p Ctx->idtr.pIdt;157 pVmcbMemState->u64EFER = p Ctx->msrEFER;158 pVmcbMemState->u64CR4 = p Ctx->cr4;159 pVmcbMemState->u64CR3 = p Ctx->cr3;160 pVmcbMemState->u64CR2 = p Ctx->cr2;161 pVmcbMemState->u64CR0 = p Ctx->cr0;147 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, ES, es); 148 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, CS, cs); 149 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, SS, ss); 150 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), pVmcbMemState, DS, ds); 151 pVmcbMemState->GDTR.u32Limit = pVCpu->cpum.GstCtx.gdtr.cbGdt; 152 pVmcbMemState->GDTR.u64Base = pVCpu->cpum.GstCtx.gdtr.pGdt; 153 pVmcbMemState->IDTR.u32Limit = pVCpu->cpum.GstCtx.idtr.cbIdt; 154 pVmcbMemState->IDTR.u64Base = pVCpu->cpum.GstCtx.idtr.pIdt; 155 pVmcbMemState->u64EFER = pVCpu->cpum.GstCtx.msrEFER; 156 pVmcbMemState->u64CR4 = pVCpu->cpum.GstCtx.cr4; 157 pVmcbMemState->u64CR3 = pVCpu->cpum.GstCtx.cr3; 158 pVmcbMemState->u64CR2 = pVCpu->cpum.GstCtx.cr2; 159 pVmcbMemState->u64CR0 = pVCpu->cpum.GstCtx.cr0; 162 160 /** @todo Nested paging. */ 163 pVmcbMemState->u64RFlags = p Ctx->rflags.u64;164 pVmcbMemState->u64RIP = p Ctx->rip;165 pVmcbMemState->u64RSP = p Ctx->rsp;166 pVmcbMemState->u64RAX = p Ctx->rax;167 pVmcbMemState->u64DR7 = p Ctx->dr[7];168 pVmcbMemState->u64DR6 = p Ctx->dr[6];169 pVmcbMemState->u8CPL = p Ctx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */170 Assert(CPUMGetGuestCPL(pVCpu) == p Ctx->ss.Attr.n.u2Dpl);171 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx))172 pVmcbMemState->u64PAT = p Ctx->msrPAT;161 pVmcbMemState->u64RFlags = pVCpu->cpum.GstCtx.rflags.u64; 162 pVmcbMemState->u64RIP = pVCpu->cpum.GstCtx.rip; 163 pVmcbMemState->u64RSP = pVCpu->cpum.GstCtx.rsp; 164 pVmcbMemState->u64RAX = pVCpu->cpum.GstCtx.rax; 165 pVmcbMemState->u64DR7 = pVCpu->cpum.GstCtx.dr[7]; 166 pVmcbMemState->u64DR6 = pVCpu->cpum.GstCtx.dr[6]; 167 pVmcbMemState->u8CPL = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */ 168 Assert(CPUMGetGuestCPL(pVCpu) == pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl); 169 if (CPUMIsGuestSvmNestedPagingEnabled(pVCpu, IEM_GET_CTX(pVCpu))) 170 pVmcbMemState->u64PAT = pVCpu->cpum.GstCtx.msrPAT; 173 171 174 172 /* … … 192 190 193 191 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadow. */ 194 && EMGetInhibitInterruptsPC(pVCpu) == p Ctx->rip)192 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip) 195 193 { 196 194 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1; 197 195 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 198 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", p Ctx->rip));196 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pVCpu->cpum.GstCtx.rip)); 199 197 } 200 198 else … … 255 253 pVmcbMemCtrl->EventInject.n.u1Valid = 0; 256 254 257 iemMemPageUnmap(pVCpu, p Ctx->hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, pVmcbMem, &PgLockMem);255 iemMemPageUnmap(pVCpu, pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, IEM_ACCESS_DATA_RW, pVmcbMem, &PgLockMem); 258 256 } 259 257 … … 266 264 */ 267 265 memset(pVmcbCtrl, 0, sizeof(*pVmcbCtrl)); 268 Assert(!CPUMIsGuestInSvmNestedHwVirtMode( pCtx));266 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))); 269 267 270 268 /* 271 269 * Restore the subset of force-flags that were preserved. 272 270 */ 273 if (p Ctx->hwvirt.fLocalForcedActions)274 { 275 VMCPU_FF_SET(pVCpu, p Ctx->hwvirt.fLocalForcedActions);276 p Ctx->hwvirt.fLocalForcedActions = 0;271 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions) 272 { 273 VMCPU_FF_SET(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions); 274 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0; 277 275 } 278 276 … … 285 283 * Reload the guest's "host state". 286 284 */ 287 CPUMSvmVmExitRestoreHostState(pVCpu, pCtx);285 CPUMSvmVmExitRestoreHostState(pVCpu, IEM_GET_CTX(pVCpu)); 288 286 289 287 /* 290 288 * Update PGM, IEM and others of a world-switch. 291 289 */ 292 rcStrict = iemSvmWorldSwitch(pVCpu , pCtx);290 rcStrict = iemSvmWorldSwitch(pVCpu); 293 291 if (rcStrict == VINF_SUCCESS) 294 292 rcStrict = VINF_SVM_VMEXIT; … … 304 302 else 305 303 { 306 AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", p Ctx->hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict)));304 AssertMsgFailed(("iemSvmVmexit: Mapping VMCB at %#RGp failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb, VBOXSTRICTRC_VAL(rcStrict))); 307 305 rcStrict = VERR_SVM_VMEXIT_FAILED; 308 306 } … … 334 332 * 335 333 * @param pVCpu The cross context virtual CPU structure. 336 * @param pCtx Pointer to the guest-CPU context.337 334 * @param cbInstr The length of the VMRUN instruction. 338 335 * @param GCPhysVmcb Guest physical address of the VMCB to run. 339 336 */ 340 IEM_STATIC VBOXSTRICTRC iemSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx,uint8_t cbInstr, RTGCPHYS GCPhysVmcb)337 IEM_STATIC VBOXSTRICTRC iemSvmVmrun(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPhysVmcb) 341 338 { 342 339 LogFlow(("iemSvmVmrun\n")); … … 345 342 * Cache the physical address of the VMCB for #VMEXIT exceptions. 346 343 */ 347 p Ctx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;344 pVCpu->cpum.GstCtx.hwvirt.svm.GCPhysVmcb = GCPhysVmcb; 348 345 349 346 /* 350 347 * Save the host state. 351 348 */ 352 CPUMSvmVmRunSaveHostState( pCtx, cbInstr);349 CPUMSvmVmRunSaveHostState(IEM_GET_CTX(pVCpu), cbInstr); 353 350 354 351 /* … … 356 353 */ 357 354 PVM pVM = pVCpu->CTX_SUFF(pVM); 358 int rc = PGMPhysSimpleReadGCPhys(pVM, p Ctx->hwvirt.svm.CTX_SUFF(pVmcb), GCPhysVmcb, sizeof(SVMVMCB));355 int rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb), GCPhysVmcb, sizeof(SVMVMCB)); 359 356 if (RT_SUCCESS(rc)) 360 357 { … … 371 368 * typically enter hardware-assisted SVM soon anyway, see @bugref{7243#c113}. 372 369 */ 373 PSVMVMCBCTRL pVmcbCtrl = &p Ctx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;374 PSVMVMCBSTATESAVE pVmcbNstGst = &p Ctx->hwvirt.svm.CTX_SUFF(pVmcb)->guest;370 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 371 PSVMVMCBSTATESAVE pVmcbNstGst = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->guest; 375 372 376 373 RT_ZERO(pVmcbCtrl->u8Reserved0); … … 399 396 */ 400 397 /* VMRUN must always be intercepted. */ 401 if (!CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))398 if (!CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_VMRUN)) 402 399 { 403 400 Log(("iemSvmVmrun: VMRUN instruction not intercepted -> #VMEXIT\n")); 404 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);401 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 405 402 } 406 403 … … 449 446 { 450 447 Log(("iemSvmVmrun: Guest ASID is invalid -> #VMEXIT\n")); 451 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);448 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 452 449 } 453 450 … … 476 473 { 477 474 Log(("iemSvmVmrun: Flush-by-ASID not supported -> #VMEXIT\n")); 478 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);475 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 479 476 } 480 477 … … 487 484 { 488 485 Log(("iemSvmVmrun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap)); 489 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);486 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 490 487 } 491 488 … … 497 494 { 498 495 Log(("iemSvmVmrun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap)); 499 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);496 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 500 497 } 501 498 … … 505 502 { 506 503 Log(("iemSvmVmrun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0)); 507 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);504 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 508 505 } 509 506 if (pVmcbNstGst->u64CR0 >> 32) 510 507 { 511 508 Log(("iemSvmVmrun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64CR0)); 512 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);509 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 513 510 } 514 511 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */ … … 520 517 Log(("iemSvmVmrun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64DR6, 521 518 pVmcbNstGst->u64DR6)); 522 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);519 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 523 520 } 524 521 … … 533 530 { 534 531 Log(("iemSvmVmrun: PAT invalid. u64PAT=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64PAT)); 535 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);532 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 536 533 } 537 534 … … 539 536 * Copy the IO permission bitmap into the cache. 540 537 */ 541 Assert(p Ctx->hwvirt.svm.CTX_SUFF(pvIoBitmap));542 rc = PGMPhysSimpleReadGCPhys(pVM, p Ctx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,538 Assert(pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap)); 539 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap, 543 540 SVM_IOPM_PAGES * X86_PAGE_4K_SIZE); 544 541 if (RT_FAILURE(rc)) 545 542 { 546 543 Log(("iemSvmVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc)); 547 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);544 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 548 545 } 549 546 … … 551 548 * Copy the MSR permission bitmap into the cache. 552 549 */ 553 Assert(p Ctx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));554 rc = PGMPhysSimpleReadGCPhys(pVM, p Ctx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,550 Assert(pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap)); 551 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap, 555 552 SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE); 556 553 if (RT_FAILURE(rc)) 557 554 { 558 555 Log(("iemSvmVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc)); 559 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);556 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 560 557 } 561 558 … … 567 564 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway. 568 565 */ 569 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, pVmcbNstGst, ES, es);570 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, pVmcbNstGst, CS, cs);571 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, pVmcbNstGst, SS, ss);572 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, pVmcbNstGst, DS, ds);566 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, ES, es); 567 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, CS, cs); 568 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, SS, ss); 569 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), pVmcbNstGst, DS, ds); 573 570 574 571 /** @todo Segment attribute overrides by VMRUN. */ … … 580 577 * We shall thus adjust both CS.DPL and SS.DPL here. 581 578 */ 582 p Ctx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL;583 if (CPUMIsGuestInV86ModeEx( pCtx))584 p Ctx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3;585 if (CPUMIsGuestInRealModeEx( pCtx))586 p Ctx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0;587 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &p Ctx->ss));579 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = pVmcbNstGst->u8CPL; 580 if (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(pVCpu))) 581 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 3; 582 if (CPUMIsGuestInRealModeEx(IEM_GET_CTX(pVCpu))) 583 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl = 0; 584 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 588 585 589 586 /* … … 600 597 { 601 598 Log(("iemSvmVmrun: EFER invalid uOldEfer=%#RX64 -> #VMEXIT\n", pVmcbNstGst->u64EFER)); 602 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);599 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 603 600 } 604 601 … … 611 608 bool const fProtMode = RT_BOOL(pVmcbNstGst->u64CR0 & X86_CR0_PE); 612 609 bool const fLongModeWithPaging = fLongModeEnabled && fPaging; 613 bool const fLongModeConformCS = p Ctx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig;610 bool const fLongModeConformCS = pVCpu->cpum.GstCtx.cs.Attr.n.u1Long && pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig; 614 611 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */ 615 612 if (fLongModeWithPaging) … … 626 623 { 627 624 Log(("iemSvmVmrun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer)); 628 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);625 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 629 626 } 630 627 … … 645 642 * nested-guest. 646 643 */ 647 p Ctx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;644 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS; 648 645 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 649 646 … … 653 650 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilter) 654 651 { 655 p Ctx->hwvirt.svm.cPauseFilter = pVmcbCtrl->u16PauseFilterCount;652 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = pVmcbCtrl->u16PauseFilterCount; 656 653 if (pVM->cpum.ro.GuestFeatures.fSvmPauseFilterThreshold) 657 p Ctx->hwvirt.svm.cPauseFilterThreshold = pVmcbCtrl->u16PauseFilterCount;654 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold = pVmcbCtrl->u16PauseFilterCount; 658 655 } 659 656 … … 684 681 * Copy the remaining guest state from the VMCB to the guest-CPU context. 685 682 */ 686 p Ctx->gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit;687 p Ctx->gdtr.pGdt = pVmcbNstGst->GDTR.u64Base;688 p Ctx->idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit;689 p Ctx->idtr.pIdt = pVmcbNstGst->IDTR.u64Base;683 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcbNstGst->GDTR.u32Limit; 684 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcbNstGst->GDTR.u64Base; 685 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcbNstGst->IDTR.u32Limit; 686 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcbNstGst->IDTR.u64Base; 690 687 CPUMSetGuestCR0(pVCpu, pVmcbNstGst->u64CR0); 691 688 CPUMSetGuestCR4(pVCpu, pVmcbNstGst->u64CR4); 692 p Ctx->cr3 = pVmcbNstGst->u64CR3;693 p Ctx->cr2 = pVmcbNstGst->u64CR2;694 p Ctx->dr[6] = pVmcbNstGst->u64DR6;695 p Ctx->dr[7] = pVmcbNstGst->u64DR7;696 p Ctx->rflags.u64 = pVmcbNstGst->u64RFlags;697 p Ctx->rax = pVmcbNstGst->u64RAX;698 p Ctx->rsp = pVmcbNstGst->u64RSP;699 p Ctx->rip = pVmcbNstGst->u64RIP;700 CPUMSetGuestMsrEferNoCheck(pVCpu, p Ctx->msrEFER, uValidEfer);689 pVCpu->cpum.GstCtx.cr3 = pVmcbNstGst->u64CR3; 690 pVCpu->cpum.GstCtx.cr2 = pVmcbNstGst->u64CR2; 691 pVCpu->cpum.GstCtx.dr[6] = pVmcbNstGst->u64DR6; 692 pVCpu->cpum.GstCtx.dr[7] = pVmcbNstGst->u64DR7; 693 pVCpu->cpum.GstCtx.rflags.u64 = pVmcbNstGst->u64RFlags; 694 pVCpu->cpum.GstCtx.rax = pVmcbNstGst->u64RAX; 695 pVCpu->cpum.GstCtx.rsp = pVmcbNstGst->u64RSP; 696 pVCpu->cpum.GstCtx.rip = pVmcbNstGst->u64RIP; 697 CPUMSetGuestMsrEferNoCheck(pVCpu, pVCpu->cpum.GstCtx.msrEFER, uValidEfer); 701 698 if (pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging) 702 p Ctx->msrPAT = pVmcbNstGst->u64PAT;699 pVCpu->cpum.GstCtx.msrPAT = pVmcbNstGst->u64PAT; 703 700 704 701 /* Mask DR6, DR7 bits mandatory set/clear bits. */ 705 p Ctx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);706 p Ctx->dr[6] |= X86_DR6_RA1_MASK;707 p Ctx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);708 p Ctx->dr[7] |= X86_DR7_RA1_MASK;702 pVCpu->cpum.GstCtx.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK); 703 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_RA1_MASK; 704 pVCpu->cpum.GstCtx.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK); 705 pVCpu->cpum.GstCtx.dr[7] |= X86_DR7_RA1_MASK; 709 706 710 707 /* … … 719 716 * Update PGM, IEM and others of a world-switch. 720 717 */ 721 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu , pCtx);718 VBOXSTRICTRC rcStrict = iemSvmWorldSwitch(pVCpu); 722 719 if (rcStrict == VINF_SUCCESS) 723 720 { /* likely */ } … … 736 733 * Clear global interrupt flags to allow interrupts in the guest. 737 734 */ 738 p Ctx->hwvirt.fGif = true;735 pVCpu->cpum.GstCtx.hwvirt.fGif = true; 739 736 740 737 /* … … 742 739 */ 743 740 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject; 744 p Ctx->hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;741 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid; 745 742 if (pEventInject->n.u1Valid) 746 743 { … … 753 750 { 754 751 Log(("iemSvmVmrun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type)); 755 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);752 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 756 753 } 757 754 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION) … … 761 758 { 762 759 Log(("iemSvmVmrun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector)); 763 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);760 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 764 761 } 765 762 if ( uVector == X86_XCPT_BR 766 && CPUMIsGuestInLongModeEx( pCtx))763 && CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu))) 767 764 { 768 765 Log(("iemSvmVmrun: Cannot inject #BR when not in long mode -> #VMEXIT\n")); 769 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);766 return iemSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 770 767 } 771 768 /** @todo any others? */ … … 794 791 * below. */ 795 792 LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 vec=%#x type=%d uErr=%u cr2=%#RX64 cr3=%#RX64 efer=%#RX64\n", 796 p Ctx->cs.Sel, pCtx->rip, uVector, enmType, uErrorCode, pCtx->cr2, pCtx->cr3, pCtx->msrEFER));793 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER)); 797 794 798 795 /* … … 806 803 if ( enmType == TRPM_TRAP 807 804 && uVector == X86_XCPT_PF) 808 TRPMSetFaultAddress(pVCpu, p Ctx->cr2);805 TRPMSetFaultAddress(pVCpu, pVCpu->cpum.GstCtx.cr2); 809 806 } 810 807 else 811 808 LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n", 812 p Ctx->cs.Sel, pCtx->rip, pCtx->cr0, pCtx->cr3, pCtx->cr4, pCtx->msrEFER, pCtx->rflags.u64));809 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64)); 813 810 814 811 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict))); … … 843 840 * @returns VBox strict status code. 844 841 * @param pVCpu The cross context virtual CPU structure of the calling thread. 845 * @param pCtx Pointer to the guest-CPU context.846 842 * @param u8Vector The interrupt or exception vector. 847 843 * @param fFlags The exception flags (see IEM_XCPT_FLAGS_XXX). … … 849 845 * @param uCr2 The CR2 value in case of a \#PF exception. 850 846 */ 851 IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, 852 uint64_t uCr2) 853 { 854 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 847 IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) 848 { 849 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))); 855 850 856 851 /* … … 892 887 && !(uErr & X86_TRAP_PF_ID)) 893 888 { 894 PSVMVMCBCTRL pVmcbCtrl = &p Ctx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;889 PSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 895 890 # ifdef IEM_WITH_CODE_TLB 896 891 uint8_t const *pbInstrBuf = pVCpu->iem.s.pbInstrBuf; … … 910 905 IEM_SVM_UPDATE_NRIP(pVCpu); 911 906 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x " 912 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", p Ctx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u32InterceptXcpt,907 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u32InterceptXcpt, 913 908 u8Vector, uExitInfo1, uExitInfo2)); 914 909 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2); … … 965 960 966 961 SVMIOIOEXITINFO IoExitInfo; 967 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 968 void *pvIoBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap); 962 void *pvIoBitmap = pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvIoBitmap); 969 963 bool const fIntercept = HMSvmIsIOInterceptActive(pvIoBitmap, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, 970 964 &IoExitInfo); … … 973 967 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port)); 974 968 IEM_SVM_UPDATE_NRIP(pVCpu); 975 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_IOIO, IoExitInfo.u, pCtx->rip + cbInstr);969 return iemSvmVmexit(pVCpu, SVM_EXIT_IOIO, IoExitInfo.u, pVCpu->cpum.GstCtx.rip + cbInstr); 976 970 } 977 971 … … 996 990 * 997 991 * @param pVCpu The cross context virtual CPU structure. 998 * @param pCtx The guest-CPU context.999 992 * @param idMsr The MSR being accessed in the nested-guest. 1000 993 * @param fWrite Whether this is an MSR write access, @c false implies an … … 1002 995 * @param cbInstr The length of the MSR read/write instruction in bytes. 1003 996 */ 1004 IEM_STATIC VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPU pVCpu, PCPUMCTX pCtx,uint32_t idMsr, bool fWrite)997 IEM_STATIC VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPU pVCpu, uint32_t idMsr, bool fWrite) 1005 998 { 1006 999 /* 1007 1000 * Check if any MSRs are being intercepted. 1008 1001 */ 1009 Assert(CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT));1010 Assert(CPUMIsGuestInSvmNestedHwVirtMode( pCtx));1002 Assert(CPUMIsGuestSvmCtrlInterceptSet(pVCpu, IEM_GET_CTX(pVCpu), SVM_CTRL_INTERCEPT_MSR_PROT)); 1003 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))); 1011 1004 1012 1005 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ; … … 1028 1021 * Check if the bit is set, if so, trigger a #VMEXIT. 1029 1022 */ 1030 uint8_t *pbMsrpm = (uint8_t *)p Ctx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);1023 uint8_t *pbMsrpm = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pvMsrBitmap); 1031 1024 pbMsrpm += offMsrpm; 1032 1025 if (*pbMsrpm & RT_BIT(uMsrpmBit)) 1033 1026 { 1034 1027 IEM_SVM_UPDATE_NRIP(pVCpu); 1035 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);1028 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */); 1036 1029 } 1037 1030 } … … 1042 1035 */ 1043 1036 Log(("iemSvmHandleMsrIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool -> #VMEXIT\n", idMsr, fWrite)); 1044 return iemSvmVmexit(pVCpu, pCtx,SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);1037 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */); 1045 1038 } 1046 1039 return VINF_HM_INTERCEPT_NOT_ACTIVE; … … 1059 1052 # else 1060 1053 LogFlow(("iemCImpl_vmrun\n")); 1061 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1062 1054 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun); 1063 1055 1064 1056 /** @todo Check effective address size using address size prefix. */ 1065 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? p Ctx->rax : pCtx->eax;1057 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 1066 1058 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 1067 1059 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) … … 1077 1069 } 1078 1070 1079 VBOXSTRICTRC rcStrict = iemSvmVmrun(pVCpu, pCtx,cbInstr, GCPhysVmcb);1071 VBOXSTRICTRC rcStrict = iemSvmVmrun(pVCpu, cbInstr, GCPhysVmcb); 1080 1072 if (rcStrict == VERR_SVM_VMEXIT_FAILED) 1081 1073 { 1082 Assert(!CPUMIsGuestInSvmNestedHwVirtMode( pCtx));1074 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))); 1083 1075 rcStrict = VINF_EM_TRIPLE_FAULT; 1084 1076 } … … 1161 1153 # else 1162 1154 LogFlow(("iemCImpl_vmload\n")); 1163 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1164 1155 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload); 1165 1156 1166 1157 /** @todo Check effective address size using address size prefix. */ 1167 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? p Ctx->rax : pCtx->eax;1158 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 1168 1159 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 1169 1160 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) … … 1185 1176 { 1186 1177 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode)); 1187 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, &VmcbNstGst, FS, fs);1188 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, &VmcbNstGst, GS, gs);1189 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, &VmcbNstGst, TR, tr);1190 HMSVM_SEG_REG_COPY_FROM_VMCB( pCtx, &VmcbNstGst, LDTR, ldtr);1191 1192 p Ctx->msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;1193 p Ctx->msrSTAR = VmcbNstGst.u64STAR;1194 p Ctx->msrLSTAR = VmcbNstGst.u64LSTAR;1195 p Ctx->msrCSTAR = VmcbNstGst.u64CSTAR;1196 p Ctx->msrSFMASK = VmcbNstGst.u64SFMASK;1197 1198 p Ctx->SysEnter.cs = VmcbNstGst.u64SysEnterCS;1199 p Ctx->SysEnter.esp = VmcbNstGst.u64SysEnterESP;1200 p Ctx->SysEnter.eip = VmcbNstGst.u64SysEnterEIP;1178 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs); 1179 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs); 1180 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr); 1181 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr); 1182 1183 pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase; 1184 pVCpu->cpum.GstCtx.msrSTAR = VmcbNstGst.u64STAR; 1185 pVCpu->cpum.GstCtx.msrLSTAR = VmcbNstGst.u64LSTAR; 1186 pVCpu->cpum.GstCtx.msrCSTAR = VmcbNstGst.u64CSTAR; 1187 pVCpu->cpum.GstCtx.msrSFMASK = VmcbNstGst.u64SFMASK; 1188 1189 pVCpu->cpum.GstCtx.SysEnter.cs = VmcbNstGst.u64SysEnterCS; 1190 pVCpu->cpum.GstCtx.SysEnter.esp = VmcbNstGst.u64SysEnterESP; 1191 pVCpu->cpum.GstCtx.SysEnter.eip = VmcbNstGst.u64SysEnterEIP; 1201 1192 1202 1193 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 1217 1208 # else 1218 1209 LogFlow(("iemCImpl_vmsave\n")); 1219 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1220 1210 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave); 1221 1211 1222 1212 /** @todo Check effective address size using address size prefix. */ 1223 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? p Ctx->rax : pCtx->eax;1213 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 1224 1214 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 1225 1215 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) … … 1241 1231 { 1242 1232 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode)); 1243 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, &VmcbNstGst, FS, fs);1244 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, &VmcbNstGst, GS, gs);1245 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, &VmcbNstGst, TR, tr);1246 HMSVM_SEG_REG_COPY_TO_VMCB( pCtx, &VmcbNstGst, LDTR, ldtr);1247 1248 VmcbNstGst.u64KernelGSBase = p Ctx->msrKERNELGSBASE;1249 VmcbNstGst.u64STAR = p Ctx->msrSTAR;1250 VmcbNstGst.u64LSTAR = p Ctx->msrLSTAR;1251 VmcbNstGst.u64CSTAR = p Ctx->msrCSTAR;1252 VmcbNstGst.u64SFMASK = p Ctx->msrSFMASK;1253 1254 VmcbNstGst.u64SysEnterCS = p Ctx->SysEnter.cs;1255 VmcbNstGst.u64SysEnterESP = p Ctx->SysEnter.esp;1256 VmcbNstGst.u64SysEnterEIP = p Ctx->SysEnter.eip;1233 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs); 1234 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs); 1235 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr); 1236 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr); 1237 1238 VmcbNstGst.u64KernelGSBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE; 1239 VmcbNstGst.u64STAR = pVCpu->cpum.GstCtx.msrSTAR; 1240 VmcbNstGst.u64LSTAR = pVCpu->cpum.GstCtx.msrLSTAR; 1241 VmcbNstGst.u64CSTAR = pVCpu->cpum.GstCtx.msrCSTAR; 1242 VmcbNstGst.u64SFMASK = pVCpu->cpum.GstCtx.msrSFMASK; 1243 1244 VmcbNstGst.u64SysEnterCS = pVCpu->cpum.GstCtx.SysEnter.cs; 1245 VmcbNstGst.u64SysEnterESP = pVCpu->cpum.GstCtx.SysEnter.esp; 1246 VmcbNstGst.u64SysEnterEIP = pVCpu->cpum.GstCtx.SysEnter.eip; 1257 1247 1258 1248 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest), &VmcbNstGst, … … 1276 1266 # else 1277 1267 LogFlow(("iemCImpl_clgi\n")); 1278 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1279 1268 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi); 1280 1269 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) … … 1284 1273 } 1285 1274 1286 p Ctx->hwvirt.fGif = false;1275 pVCpu->cpum.GstCtx.hwvirt.fGif = false; 1287 1276 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1288 1277 … … 1306 1295 # else 1307 1296 LogFlow(("iemCImpl_stgi\n")); 1308 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1309 1297 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi); 1310 1298 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI)) … … 1314 1302 } 1315 1303 1316 p Ctx->hwvirt.fGif = true;1304 pVCpu->cpum.GstCtx.hwvirt.fGif = true; 1317 1305 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1318 1306 … … 1331 1319 IEM_CIMPL_DEF_0(iemCImpl_invlpga) 1332 1320 { 1333 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);1334 1321 /** @todo Check effective address size using address size prefix. */ 1335 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? p Ctx->rax : pCtx->eax;1322 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 1336 1323 /** @todo PGM needs virtual ASID support. */ 1337 1324 # if 0 1338 uint32_t const uAsid = p Ctx->ecx;1325 uint32_t const uAsid = pVCpu->cpum.GstCtx.ecx; 1339 1326 # endif 1340 1327 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r72209 r72496 4265 4265 /* Calc effective address with modified ESP. */ 4266 4266 /** @todo testcase */ 4267 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);4268 4267 RTGCPTR GCPtrEff; 4269 4268 VBOXSTRICTRC rcStrict; … … 4281 4280 /* Perform the operation - this should be CImpl. */ 4282 4281 RTUINT64U TmpRsp; 4283 TmpRsp.u = p Ctx->rsp;4282 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 4284 4283 switch (pVCpu->iem.s.enmEffOpSize) 4285 4284 { … … 4315 4314 if (rcStrict == VINF_SUCCESS) 4316 4315 { 4317 p Ctx->rsp = TmpRsp.u;4316 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 4318 4317 iemRegUpdateRipAndClearRF(pVCpu); 4319 4318 } … … 4461 4460 { 4462 4461 /* TSC based pause-filter thresholding. */ 4463 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);4464 4462 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold 4465 && pCtx->hwvirt.svm.cPauseFilterThreshold > 0) 4463 /** @todo r=bird: You're mixing decoding and implementation here!! pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold can be 4464 * modified by the guest and you cannot have a compile time check on it like this. Ditto for pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter below. 4465 * You need to move this into IEMAllCImpl.cpp.h! 4466 */ 4467 && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0) 4466 4468 { 4467 4469 uint64_t const uTick = TMCpuTickGet(pVCpu); 4468 if (uTick - p Ctx->hwvirt.svm.uPrevPauseTick > pCtx->hwvirt.svm.cPauseFilterThreshold)4469 p Ctx->hwvirt.svm.cPauseFilter = IEM_GET_SVM_PAUSE_FILTER_COUNT(pVCpu);4470 p Ctx->hwvirt.svm.uPrevPauseTick = uTick;4470 if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold) 4471 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = IEM_GET_SVM_PAUSE_FILTER_COUNT(pVCpu); 4472 pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick; 4471 4473 } 4472 4474 4473 4475 /* Simple pause-filter counter. */ 4474 if (p Ctx->hwvirt.svm.cPauseFilter > 0)4476 if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0) 4475 4477 { 4476 --p Ctx->hwvirt.svm.cPauseFilter;4478 --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter; 4477 4479 fCheckIntercept = false; 4478 4480 } -
trunk/src/VBox/VMM/include/IEMInternal.h
r72495 r72496 636 636 /** @def IEM_CTX_ASSERT 637 637 * Asserts that the @a a_fExtrnMbz is present in the CPU context. 638 * @param a_p Ctx The CPUMCTX structure.638 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 639 639 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero. 640 640 */ 641 #define IEM_CTX_ASSERT(a_p Ctx, a_fExtrnMbz) Assert(!((a_pCtx)->fExtrn & (a_fExtrnMbz)))641 #define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) Assert(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz))) 642 642 643 643 /** @def IEM_CTX_IMPORT_RET … … 648 648 * Returns on import failure. 649 649 * 650 * @param a_pVCpu The cross context virtual CPU structure. 651 * @param a_pCtx The CPUMCTX structure. 650 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 652 651 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import. 653 652 */ 654 #define IEM_CTX_IMPORT_RET(a_pVCpu, a_ pCtx, a_fExtrnImport) \653 #define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \ 655 654 do { \ 656 if (!((a_p Ctx)->fExtrn & (a_fExtrnImport))) \655 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \ 657 656 { /* likely */ } \ 658 657 else \ … … 668 667 * Will call the keep to import the bits as needed. 669 668 * 670 * @param a_pVCpu The cross context virtual CPU structure. 671 * @param a_pCtx The CPUMCTX structure. 669 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 672 670 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import. 673 671 */ 674 #define IEM_CTX_IMPORT_NORET(a_pVCpu, a_ pCtx, a_fExtrnImport) \672 #define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \ 675 673 do { \ 676 if (!((a_p Ctx)->fExtrn & (a_fExtrnImport))) \674 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \ 677 675 { /* likely */ } \ 678 676 else \ … … 690 688 * Jumps on import failure. 691 689 * 692 * @param a_pVCpu The cross context virtual CPU structure. 693 * @param a_pCtx The CPUMCTX structure. 690 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 694 691 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import. 695 692 */ 696 #define IEM_CTX_IMPORT_JMP(a_pVCpu, a_ pCtx, a_fExtrnImport) \693 #define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \ 697 694 do { \ 698 if (!((a_p Ctx)->fExtrn & (a_fExtrnImport))) \695 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \ 699 696 { /* likely */ } \ 700 697 else \
Note:
See TracChangeset
for help on using the changeset viewer.