Changeset 52913 in vbox for trunk/src/VBox
- Timestamp:
- Oct 1, 2014 12:29:54 PM (10 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r52884 r52913 999 999 IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) 1000 1000 { 1001 /* Call various functions to do the work. Clear RF. */ 1001 #ifndef IEM_IMPLEMENTS_CALLGATE 1002 1002 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 1003 #else 1004 /* NB: Far jumps can only do intra-privilege transfers. Far calls support 1005 * inter-privilege calls and are much more complex. 1006 * 1007 * NB: 64-bit call gate has the same type as a 32-bit call gate! If 1008 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate 1009 * must be 16-bit or 32-bit. 1010 */ 1011 /** @todo: effective operand size is probably irrelevant here, only the 1012 * call gate bitness matters?? 1013 */ 1014 VBOXSTRICTRC rcStrict; 1015 RTPTRUNION uPtrRet; 1016 uint64_t uNewRsp; 1017 uint64_t uNewRip; 1018 uint64_t u64Base; 1019 uint32_t cbLimit; 1020 RTSEL uNewCS; 1021 IEMSELDESC DescCS; 1022 PCPUMCTX pCtx; 1023 1024 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE); 1025 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 1026 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE 1027 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE); 1028 1029 /* Determine the new instruction pointer from the gate descriptor. */ 1030 uNewRip = pDesc->Legacy.Gate.u16OffsetLow 1031 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16) 1032 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32); 1033 1034 /* Perform DPL checks on the gate descriptor. */ 1035 if ( pDesc->Legacy.Gate.u2Dpl < pIemCpu->uCpl 1036 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL)) 1037 { 1038 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl, 1039 pIemCpu->uCpl, (uSel & X86_SEL_RPL))); 1040 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1041 } 1042 1043 /** @todo does this catch NULL selectors, too? */ 1044 if (!pDesc->Legacy.Gen.u1Present) 1045 { 1046 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel)); 1047 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel); 1048 } 1049 1050 /* 1051 * Fetch the target CS descriptor from the GDT or LDT. 1052 */ 1053 uNewCS = pDesc->Legacy.Gate.u16Sel; 1054 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS, X86_XCPT_GP); 1055 if (rcStrict != VINF_SUCCESS) 1056 return rcStrict; 1057 1058 /* Target CS must be a code selector. */ 1059 if ( !DescCS.Legacy.Gen.u1DescType 1060 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) ) 1061 { 1062 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n", 1063 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type)); 1064 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1065 } 1066 1067 /* Privilege checks on target CS. */ 1068 if (enmBranch == IEMBRANCH_JUMP) 1069 { 1070 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1071 { 1072 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl) 1073 { 1074 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n", 1075 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 1076 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1077 } 1078 } 1079 else 1080 { 1081 if (DescCS.Legacy.Gen.u2Dpl != pIemCpu->uCpl) 1082 { 1083 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n", 1084 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 1085 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1086 } 1087 } 1088 } 1089 else 1090 { 1091 Assert(enmBranch == IEMBRANCH_CALL); 1092 if (DescCS.Legacy.Gen.u2Dpl > pIemCpu->uCpl) 1093 { 1094 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n", 1095 uNewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 1096 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS & X86_SEL_MASK_OFF_RPL); 1097 } 1098 } 1099 1100 /* Additional long mode checks. */ 1101 if (IEM_IS_LONG_MODE(pIemCpu)) 1102 { 1103 if (!DescCS.Legacy.Gen.u1Long) 1104 { 1105 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS)); 1106 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1107 } 1108 1109 /* L vs D. */ 1110 if ( DescCS.Legacy.Gen.u1Long 1111 && DescCS.Legacy.Gen.u1DefBig) 1112 { 1113 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS)); 1114 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1115 } 1116 } 1117 1118 if (!DescCS.Legacy.Gate.u1Present) 1119 { 1120 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS)); 1121 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS); 1122 } 1123 1124 pCtx = pIemCpu->CTX_SUFF(pCtx); 1125 1126 if (enmBranch == IEMBRANCH_JUMP) 1127 { 1128 /** @todo: This is very similar to regular far jumps; merge! */ 1129 /* Jumps are fairly simple... */ 1130 1131 /* Chop the high bits off if 16-bit gate (Intel says so). */ 1132 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE) 1133 uNewRip = (uint16_t)uNewRip; 1134 1135 /* Limit check for non-long segments. */ 1136 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy); 1137 if (DescCS.Legacy.Gen.u1Long) 1138 u64Base = 0; 1139 else 1140 { 1141 if (uNewRip > cbLimit) 1142 { 1143 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit)); 1144 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0); 1145 } 1146 u64Base = X86DESC_BASE(&DescCS.Legacy); 1147 } 1148 1149 /* Canonical address check. */ 1150 if (!IEM_IS_CANONICAL(uNewRip)) 1151 { 1152 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip)); 1153 return iemRaiseNotCanonical(pIemCpu); 1154 } 1155 1156 /* 1157 * Ok, everything checked out fine. Now set the accessed bit before 1158 * committing the result into CS, CSHID and RIP. 1159 */ 1160 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1161 { 1162 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS); 1163 if (rcStrict != VINF_SUCCESS) 1164 return rcStrict; 1165 /** @todo check what VT-x and AMD-V does. */ 1166 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1167 } 1168 1169 /* commit */ 1170 pCtx->rip = uNewRip; 1171 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1172 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */ 1173 pCtx->cs.ValidSel = pCtx->cs.Sel; 1174 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1175 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 1176 pCtx->cs.u32Limit = cbLimit; 1177 pCtx->cs.u64Base = u64Base; 1178 } 1179 else 1180 { 1181 Assert(enmBranch == IEMBRANCH_CALL); 1182 /* Calls are much more complicated. */ 1183 1184 if (DescCS.Legacy.Gen.u2Dpl < pIemCpu->uCpl) 1185 { 1186 uint16_t offNewStack; /* Offset of new stack in TSS. */ 1187 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */ 1188 uint8_t uNewCSDpl; 1189 uint8_t cbWords; 1190 RTSEL uNewSS; 1191 RTSEL uOldSS; 1192 uint64_t uOldRsp; 1193 IEMSELDESC DescSS; 1194 RTPTRUNION uPtrTSS; 1195 RTGCPTR GCPtrTSS; 1196 RTPTRUNION uPtrParmWds; 1197 RTGCPTR GCPtrParmWds; 1198 1199 /* More privilege. This is the fun part. */ 1200 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */ 1201 1202 /* 1203 * Determine new SS:rSP from the TSS. 1204 */ 1205 Assert(!pCtx->tr.Attr.n.u1DescType); 1206 1207 /* Figure out where the new stack pointer is stored in the TSS. */ 1208 uNewCSDpl = uNewCS & X86_SEL_RPL; 1209 if (!IEM_IS_LONG_MODE(pIemCpu)) 1210 { 1211 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) 1212 { 1213 offNewStack = RT_OFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8; 1214 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0); 1215 } 1216 else 1217 { 1218 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE); 1219 offNewStack = RT_OFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4; 1220 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0); 1221 } 1222 } 1223 else 1224 { 1225 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE); 1226 offNewStack = RT_OFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0); 1227 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0); 1228 } 1229 1230 /* Check against TSS limit. */ 1231 if ((uint16_t)(offNewStack + cbNewStack - 1) > pCtx->tr.u32Limit) 1232 { 1233 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit)); 1234 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, pCtx->tr.Sel); 1235 } 1236 1237 GCPtrTSS = pCtx->tr.u64Base + offNewStack; 1238 rcStrict = iemMemMap(pIemCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R); 1239 if (rcStrict != VINF_SUCCESS) 1240 { 1241 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1242 return rcStrict; 1243 } 1244 1245 if (!IEM_IS_LONG_MODE(pIemCpu)) 1246 { 1247 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) 1248 { 1249 uNewRsp = uPtrTSS.pu32[0]; 1250 uNewSS = uPtrTSS.pu16[2]; 1251 } 1252 else 1253 { 1254 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE); 1255 uNewRsp = uPtrTSS.pu16[0]; 1256 uNewSS = uPtrTSS.pu16[1]; 1257 } 1258 } 1259 else 1260 { 1261 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE); 1262 /* SS will be a NULL selector, but that's valid. */ 1263 uNewRsp = uPtrTSS.pu64[0]; 1264 uNewSS = uNewCSDpl; 1265 } 1266 1267 /* Done with the TSS now. */ 1268 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R); 1269 if (rcStrict != VINF_SUCCESS) 1270 { 1271 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1272 return rcStrict; 1273 } 1274 1275 /* Only used outside of long mode. */ 1276 cbWords = pDesc->Legacy.Gate.u4ParmCount; 1277 1278 /* If EFER.LMA is 0, there's extra work to do. */ 1279 if (!IEM_IS_LONG_MODE(pIemCpu)) 1280 { 1281 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0) 1282 { 1283 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n")); 1284 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS); 1285 } 1286 1287 /* Grab the new SS descriptor. */ 1288 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS); 1289 if (rcStrict != VINF_SUCCESS) 1290 return rcStrict; 1291 1292 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */ 1293 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL)) 1294 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl)) 1295 { 1296 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n", 1297 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl)); 1298 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS); 1299 } 1300 1301 /* Ensure new SS is a writable data segment. */ 1302 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) 1303 { 1304 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type)); 1305 return iemRaiseTaskSwitchFaultBySelector(pIemCpu, uNewSS); 1306 } 1307 1308 if (!DescSS.Legacy.Gen.u1Present) 1309 { 1310 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS)); 1311 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS); 1312 } 1313 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) 1314 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords); 1315 else 1316 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords); 1317 } 1318 else 1319 { 1320 /* Just grab the new (NULL) SS descriptor. */ 1321 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_SS); 1322 if (rcStrict != VINF_SUCCESS) 1323 return rcStrict; 1324 1325 cbNewStack = sizeof(uint64_t) * 4; 1326 } 1327 1328 /** @todo: According to Intel, new stack is checked for enough space first, 1329 * then switched. According to AMD, the stack is switched first and 1330 * then pushes might fault! 1331 */ 1332 1333 /** @todo: According to AMD, CS is loaded first, then SS. 1334 * According to Intel, it's the other way around!? 1335 */ 1336 1337 /** @todo: Intel and AMD disagree on when exactly the CPL changes! */ 1338 1339 /* Set the accessed bit before committing new SS. */ 1340 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1341 { 1342 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS); 1343 if (rcStrict != VINF_SUCCESS) 1344 return rcStrict; 1345 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1346 } 1347 1348 /* Remember the old SS:rSP and their linear address. */ 1349 uOldSS = pCtx->ss.Sel; 1350 uOldRsp = pCtx->rsp; 1351 1352 GCPtrParmWds = pCtx->ss.u64Base + pCtx->rsp; 1353 1354 /* Commit new SS:rSP. */ 1355 pCtx->ss.Sel = uNewSS; 1356 pCtx->ss.ValidSel = uNewSS; 1357 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 1358 pCtx->ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy); 1359 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 1360 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 1361 pCtx->rsp = uNewRsp; 1362 pIemCpu->uCpl = uNewCSDpl; 1363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), &pCtx->ss)); 1364 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS); 1365 1366 /* Check new stack - may #SS(NewSS). */ 1367 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbNewStack, 1368 &uPtrRet.pv, &uNewRsp); 1369 if (rcStrict != VINF_SUCCESS) 1370 { 1371 Log(("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1372 return rcStrict; 1373 } 1374 1375 if (!IEM_IS_LONG_MODE(pIemCpu)) 1376 { 1377 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) 1378 { 1379 /* Push the old CS:rIP. */ 1380 uPtrRet.pu32[0] = pCtx->eip + cbInstr; 1381 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */ 1382 1383 /* Map the relevant chunk of the old stack. */ 1384 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R); 1385 if (rcStrict != VINF_SUCCESS) 1386 { 1387 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1388 return rcStrict; 1389 } 1390 1391 /* Copy the parameter (d)words. */ 1392 for (int i = 0; i < cbWords; ++i) 1393 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i]; 1394 1395 /* Unmap the old stack. */ 1396 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R); 1397 if (rcStrict != VINF_SUCCESS) 1398 { 1399 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1400 return rcStrict; 1401 } 1402 1403 /* Push the old SS:rSP. */ 1404 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp; 1405 uPtrRet.pu32[2 + cbWords + 1] = uOldSS; 1406 } 1407 else 1408 { 1409 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE); 1410 1411 /* Push the old CS:rIP. */ 1412 uPtrRet.pu16[0] = pCtx->ip + cbInstr; 1413 uPtrRet.pu16[1] = pCtx->cs.Sel; 1414 1415 /* Map the relevant chunk of the old stack. */ 1416 rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R); 1417 if (rcStrict != VINF_SUCCESS) 1418 { 1419 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1420 return rcStrict; 1421 } 1422 1423 /* Copy the parameter words. */ 1424 for (int i = 0; i < cbWords; ++i) 1425 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i]; 1426 1427 /* Unmap the old stack. */ 1428 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R); 1429 if (rcStrict != VINF_SUCCESS) 1430 { 1431 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1432 return rcStrict; 1433 } 1434 1435 /* Push the old SS:rSP. */ 1436 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp; 1437 uPtrRet.pu16[2 + cbWords + 1] = uOldSS; 1438 } 1439 } 1440 else 1441 { 1442 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE); 1443 1444 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */ 1445 uPtrRet.pu64[0] = pCtx->rip + cbInstr; 1446 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */ 1447 uPtrRet.pu64[2] = uOldRsp; 1448 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */ 1449 } 1450 1451 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp); 1452 if (rcStrict != VINF_SUCCESS) 1453 { 1454 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 1455 return rcStrict; 1456 } 1457 1458 /* Chop the high bits off if 16-bit gate (Intel says so). */ 1459 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE) 1460 uNewRip = (uint16_t)uNewRip; 1461 1462 /* Limit / canonical check. */ 1463 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy); 1464 if (!IEM_IS_LONG_MODE(pIemCpu)) 1465 { 1466 if (uNewRip > cbLimit) 1467 { 1468 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit)); 1469 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0); 1470 } 1471 u64Base = X86DESC_BASE(&DescCS.Legacy); 1472 } 1473 else 1474 { 1475 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE); 1476 if (!IEM_IS_CANONICAL(uNewRip)) 1477 { 1478 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip)); 1479 return iemRaiseNotCanonical(pIemCpu); 1480 } 1481 u64Base = 0; 1482 } 1483 1484 /* 1485 * Now set the accessed bit before 1486 * writing the return address to the stack and committing the result into 1487 * CS, CSHID and RIP. 1488 */ 1489 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */ 1490 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1491 { 1492 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS); 1493 if (rcStrict != VINF_SUCCESS) 1494 return rcStrict; 1495 /** @todo check what VT-x and AMD-V does. */ 1496 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1497 } 1498 1499 /* Commit new CS:rIP. */ 1500 pCtx->rip = uNewRip; 1501 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1502 pCtx->cs.Sel |= pIemCpu->uCpl; 1503 pCtx->cs.ValidSel = pCtx->cs.Sel; 1504 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1505 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 1506 pCtx->cs.u32Limit = cbLimit; 1507 pCtx->cs.u64Base = u64Base; 1508 } 1509 else 1510 { 1511 /* Same privilege. */ 1512 /** @todo: This is very similar to regular far calls; merge! */ 1513 1514 /* Check stack first - may #SS(0). */ 1515 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in 1516 * 16-bit code cause a two or four byte CS to be pushed? */ 1517 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 1518 IEM_IS_LONG_MODE(pIemCpu) ? 8+8 1519 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2, 1520 &uPtrRet.pv, &uNewRsp); 1521 if (rcStrict != VINF_SUCCESS) 1522 return rcStrict; 1523 1524 /* Chop the high bits off if 16-bit gate (Intel says so). */ 1525 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE) 1526 uNewRip = (uint16_t)uNewRip; 1527 1528 /* Limit / canonical check. */ 1529 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy); 1530 if (!IEM_IS_LONG_MODE(pIemCpu)) 1531 { 1532 if (uNewRip > cbLimit) 1533 { 1534 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit)); 1535 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0); 1536 } 1537 u64Base = X86DESC_BASE(&DescCS.Legacy); 1538 } 1539 else 1540 { 1541 if (!IEM_IS_CANONICAL(uNewRip)) 1542 { 1543 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip)); 1544 return iemRaiseNotCanonical(pIemCpu); 1545 } 1546 u64Base = 0; 1547 } 1548 1549 /* 1550 * Now set the accessed bit before 1551 * writing the return address to the stack and committing the result into 1552 * CS, CSHID and RIP. 1553 */ 1554 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */ 1555 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1556 { 1557 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS); 1558 if (rcStrict != VINF_SUCCESS) 1559 return rcStrict; 1560 /** @todo check what VT-x and AMD-V does. */ 1561 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1562 } 1563 1564 /* stack */ 1565 if (!IEM_IS_LONG_MODE(pIemCpu)) 1566 { 1567 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) 1568 { 1569 uPtrRet.pu32[0] = pCtx->eip + cbInstr; 1570 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */ 1571 } 1572 else 1573 { 1574 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE); 1575 uPtrRet.pu16[0] = pCtx->ip + cbInstr; 1576 uPtrRet.pu16[1] = pCtx->cs.Sel; 1577 } 1578 } 1579 else 1580 { 1581 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE); 1582 uPtrRet.pu64[0] = pCtx->rip + cbInstr; 1583 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */ 1584 } 1585 1586 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp); 1587 if (rcStrict != VINF_SUCCESS) 1588 return rcStrict; 1589 1590 /* commit */ 1591 pCtx->rip = uNewRip; 1592 pCtx->cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL; 1593 pCtx->cs.Sel |= pIemCpu->uCpl; 1594 pCtx->cs.ValidSel = pCtx->cs.Sel; 1595 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1596 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 1597 pCtx->cs.u32Limit = cbLimit; 1598 pCtx->cs.u64Base = u64Base; 1599 } 1600 } 1601 pCtx->eflags.Bits.u1RF = 0; 1602 return VINF_SUCCESS; 1603 #endif 1003 1604 } 1004 1605
Note:
See TracChangeset
for help on using the changeset viewer.