- Timestamp:
- Feb 1, 2012 9:17:34 PM (13 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r39402 r39945 550 550 static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel); 551 551 static VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr); 552 static VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel); 552 553 static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr); 553 554 static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu); … … 561 562 static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 562 563 static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 564 static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 565 static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 563 566 static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel); 564 567 static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp); … … 1536 1539 1537 1540 uint32_t u32Tmp; 1538 rcStrict = iemMemFetch DataU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);1541 rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->trHid.u64Base + off); 1539 1542 if (rcStrict == VINF_SUCCESS) 1540 1543 { … … 1560 1563 1561 1564 uint64_t u64Tmp; 1562 rcStrict = iemMemFetch DataU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off);1565 rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->trHid.u64Base + off); 1563 1566 if (rcStrict == VINF_SUCCESS) 1564 1567 { … … 1701 1704 } 1702 1705 X86DESC Idte; 1703 VBOXSTRICTRC rcStrict = iemMemFetch DataU64(pIemCpu, &Idte.u, UINT8_MAX,1704 1706 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX, 1707 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector); 1705 1708 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 1706 1709 return rcStrict; … … 1871 1874 RTPTRUNION uStackFrame; 1872 1875 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 1873 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W );1876 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 1874 1877 if (rcStrict != VINF_SUCCESS) 1875 1878 return rcStrict; … … 1883 1886 uStackFrame.pu32[3] = pCtx->esp; 1884 1887 uStackFrame.pu32[4] = pCtx->ss; 1885 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W );1888 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); 1886 1889 if (rcStrict != VINF_SUCCESS) 1887 1890 return rcStrict; … … 2067 2070 2068 2071 /** @todo double and tripple faults. */ 2069 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_ NOT_IMPLEMENTED);2072 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED); 2070 2073 2071 2074 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate. … … 2217 2220 2218 2221 2222 /** \#SS(seg) - 0c. */ 2223 DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel) 2224 { 2225 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 2226 uSel & ~X86_SEL_RPL, 0); 2227 } 2228 2229 2219 2230 /** \#GP(n) - 0d. */ 2220 2231 DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr) … … 2250 2261 { 2251 2262 NOREF(iSegReg); NOREF(fAccess); 2252 return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 2263 return iemRaiseXcptOrInt(pIemCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP, 2264 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 2253 2265 } 2254 2266 … … 3362 3374 } 3363 3375 3364 if ( (fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US) 3365 && ( ( (fAccess & IEM_ACCESS_TYPE_WRITE) /* Write to read only memory? */ 3366 && !(fFlags & X86_PTE_RW) 3367 && ( pIemCpu->uCpl != 0 3368 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)) ) 3369 || ( !(fFlags & X86_PTE_US) /* Kernel memory */ 3370 && pIemCpu->uCpl == 3) 3371 || ( (fAccess & IEM_ACCESS_TYPE_EXEC) /* Executing non-executable memory? */ 3372 && (fFlags & X86_PTE_PAE_NX) 3373 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) 3374 ) 3375 ) 3376 { 3377 *pGCPhysMem = NIL_RTGCPHYS; 3378 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED); 3376 /* If the page is writable and does not have the no-exec bit set, all 3377 access is allowed. Otherwise we'll have to check more carefully... */ 3378 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)) 3379 { 3380 /* Write to read only memory? */ 3381 if ( (fAccess & IEM_ACCESS_TYPE_WRITE) 3382 && !(fFlags & X86_PTE_RW) 3383 && ( pIemCpu->uCpl != 0 3384 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP))) 3385 { 3386 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page\n", GCPtrMem)); 3387 *pGCPhysMem = NIL_RTGCPHYS; 3388 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED); 3389 } 3390 3391 /* Kernel memory accessed by userland? */ 3392 if ( !(fFlags & X86_PTE_US) 3393 && pIemCpu->uCpl == 3 3394 && !(fAccess & IEM_ACCESS_WHAT_SYS)) 3395 { 3396 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page\n", GCPtrMem)); 3397 *pGCPhysMem = NIL_RTGCPHYS; 3398 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED); 3399 } 3400 3401 /* Executing non-executable memory? */ 3402 if ( (fAccess & IEM_ACCESS_TYPE_EXEC) 3403 && (fFlags & X86_PTE_PAE_NX) 3404 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) ) 3405 { 3406 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX\n", GCPtrMem)); 3407 *pGCPhysMem = NIL_RTGCPHYS; 3408 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED); 3409 } 3379 3410 } 3380 3411 … … 4554 4585 * Begin a special stack pop (used by iret, retf and such). 4555 4586 * 4556 * This will raise #SS or#PF if appropriate.4587 * This will raise \#SS or \#PF if appropriate. 4557 4588 * 4558 4589 * @returns Strict VBox status code. … … 4562 4593 * @param puNewRsp Where to return the new RSP value. This must be 4563 4594 * passed unchanged to 4564 * iemMemStackPopCommitSpecial(). 4595 * iemMemStackPopCommitSpecial() or applied 4596 * manually if iemMemStackPopDoneSpecial() is used. 4565 4597 */ 4566 4598 static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) … … 4569 4601 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 4570 4602 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp); 4603 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 4604 } 4605 4606 4607 /** 4608 * Continue a special stack pop (used by iret). 4609 * 4610 * This will raise \#SS or \#PF if appropriate. 4611 * 4612 * @returns Strict VBox status code. 4613 * @param pIemCpu The IEM per CPU data. 4614 * @param cbMem The number of bytes to push onto the stack. 4615 * @param ppvMem Where to return the pointer to the stack memory. 4616 * @param puNewRsp Where to return the new RSP value. This must be 4617 * passed unchanged to 4618 * iemMemStackPopCommitSpecial() or applied 4619 * manually if iemMemStackPopDoneSpecial() is used. 4620 */ 4621 static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) 4622 { 4623 Assert(cbMem < UINT8_MAX); 4624 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 4625 RTUINT64U NewRsp; 4626 NewRsp.u = *puNewRsp; 4627 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx); 4628 *puNewRsp = NewRsp.u; 4571 4629 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 4572 4630 } … … 4591 4649 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp; 4592 4650 return rcStrict; 4651 } 4652 4653 4654 /** 4655 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or 4656 * iemMemStackPopContinueSpecial). 4657 * 4658 * The caller will manually commit the rSP. 4659 * 4660 * @returns Strict VBox status code. 4661 * @param pIemCpu The IEM per CPU data. 4662 * @param pvMem The pointer returned by 4663 * iemMemStackPopBeginSpecial() or 4664 * iemMemStackPopContinueSpecial(). 4665 */ 4666 static VBOXSTRICTRC iemMemStackPopDoneSpecial(PIEMCPU pIemCpu, void const *pvMem) 4667 { 4668 return iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R); 4669 } 4670 4671 4672 /** 4673 * Fetches a system table dword. 4674 * 4675 * @returns Strict VBox status code. 4676 * @param pIemCpu The IEM per CPU data. 4677 * @param pu32Dst Where to return the dword. 4678 * @param iSegReg The index of the segment register to use for 4679 * this access. The base and limits are checked. 4680 * @param GCPtrMem The address of the guest memory. 4681 */ 4682 static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 4683 { 4684 /* The lazy approach for now... */ 4685 uint32_t const *pu32Src; 4686 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); 4687 if (rc == VINF_SUCCESS) 4688 { 4689 *pu32Dst = *pu32Src; 4690 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_SYS_R); 4691 } 4692 return rc; 4693 } 4694 4695 4696 /** 4697 * Fetches a system table qword. 4698 * 4699 * @returns Strict VBox status code. 4700 * @param pIemCpu The IEM per CPU data. 4701 * @param pu64Dst Where to return the qword. 4702 * @param iSegReg The index of the segment register to use for 4703 * this access. The base and limits are checked. 4704 * @param GCPtrMem The address of the guest memory. 4705 */ 4706 static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 4707 { 4708 /* The lazy approach for now... */ 4709 uint64_t const *pu64Src; 4710 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R); 4711 if (rc == VINF_SUCCESS) 4712 { 4713 *pu64Dst = *pu64Src; 4714 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_SYS_R); 4715 } 4716 return rc; 4593 4717 } 4594 4718 … … 4640 4764 * required. 4641 4765 */ 4642 VBOXSTRICTRC rcStrict = iemMemFetch DataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));4766 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK)); 4643 4767 if (rcStrict == VINF_SUCCESS) 4644 4768 { … … 4647 4771 pDesc->Long.au64[1] = 0; 4648 4772 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt)) 4649 rcStrict = iemMemFetch DataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));4773 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK)); 4650 4774 else 4651 4775 { … … 4692 4816 /* The normal case, map the 32-bit bits around the accessed bit (40). */ 4693 4817 GCPtr += 2 + 2; 4694 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_ DATA_RW);4818 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW); 4695 4819 if (rcStrict != VINF_SUCCESS) 4696 4820 return rcStrict; … … 4700 4824 { 4701 4825 /* The misaligned GDT/LDT case, map the whole thing. */ 4702 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_ DATA_RW);4826 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW); 4703 4827 if (rcStrict != VINF_SUCCESS) 4704 4828 return rcStrict; … … 6219 6343 if (cDiffs != 0) 6220 6344 { 6345 if (LogIs3Enabled()) 6346 DBGFR3Info(pVM, "cpumguest", "verbose", NULL); 6221 6347 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); 6222 6348 iemVerifyAssertMsg2(pIemCpu); … … 6369 6495 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u, 6370 6496 szInstr)); 6497 6498 if (LogIs3Enabled()) 6499 DBGFR3Info(pVCpu->pVMR3, "cpumguest", "verbose", NULL); 6371 6500 } 6372 6501 #endif -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r39125 r39945 99 99 100 100 101 /** 102 * Loads a NULL data selector into a selector register, both the hidden and 103 * visible parts, in protected mode. 104 * 105 * @param puSel The selector register. 106 * @param pHid The hidden register part. 107 */ 108 static void iemHlpLoadNullDataSelectorProt(PRTSEL puSel, PCPUMSELREGHID pHid) 109 { 110 /** @todo write a testcase checking what happends when loading a NULL data 111 * selector in protected mode. */ 112 pHid->u64Base = 0; 113 pHid->u32Limit = 0; 114 pHid->Attr.u = 0; 115 *puSel = 0; 116 } 117 118 119 /** 120 * Helper used by iret. 121 * 122 * @param uCpl The new CPL. 123 * @param puSel The selector register. 124 * @param pHid The corresponding hidden register. 125 */ 126 static void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PRTSEL puSel, PCPUMSELREGHID pHid) 127 { 128 if ( uCpl > pHid->Attr.n.u2Dpl 129 && pHid->Attr.n.u1DescType /* code or data, not system */ 130 && (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 131 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */ 132 iemHlpLoadNullDataSelectorProt(puSel, pHid); 133 } 134 135 101 136 /** @} */ 102 137 … … 1378 1413 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS); 1379 1414 if (rcStrict != VINF_SUCCESS) 1415 { 1416 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCS, uNewEip, VBOXSTRICTRC_VAL(rcStrict))); 1380 1417 return rcStrict; 1418 } 1381 1419 1382 1420 /* Must be a code descriptor. */ … … 1417 1455 1418 1456 /* 1419 * Differentlevel?1457 * Return to outer level? 1420 1458 */ 1421 1459 if ((uNewCS & X86_SEL_RPL) != pIemCpu->uCpl) 1422 1460 { 1423 AssertFailedReturn(VERR_NOT_IMPLEMENTED); 1461 uint16_t uNewSS; 1462 uint32_t uNewESP; 1463 if (enmEffOpSize == IEMMODE_32BIT) 1464 { 1465 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp); 1466 if (rcStrict != VINF_SUCCESS) 1467 return rcStrict; 1468 uNewESP = uFrame.pu32[0]; 1469 uNewSS = (uint16_t)uFrame.pu32[1]; 1470 } 1471 else 1472 { 1473 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp); 1474 if (rcStrict != VINF_SUCCESS) 1475 return rcStrict; 1476 uNewESP = uFrame.pu16[0]; 1477 uNewSS = uFrame.pu16[1]; 1478 } 1479 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); 1480 if (rcStrict != VINF_SUCCESS) 1481 return rcStrict; 1482 1483 /* Read the SS descriptor. */ 1484 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT))) 1485 { 1486 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCS, uNewEip, uNewSS, uNewESP)); 1487 return iemRaiseGeneralProtectionFault0(pIemCpu); 1488 } 1489 1490 IEMSELDESC DescSS; 1491 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS); 1492 if (rcStrict != VINF_SUCCESS) 1493 { 1494 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n", 1495 uNewCS, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict))); 1496 return rcStrict; 1497 } 1498 1499 /* Privilege checks. */ 1500 if ((uNewSS & X86_SEL_RPL) != (uNewCS & X86_SEL_RPL)) 1501 { 1502 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCS, uNewEip, uNewSS, uNewESP)); 1503 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1504 } 1505 if (DescSS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL)) 1506 { 1507 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n", 1508 uNewCS, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl)); 1509 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1510 } 1511 1512 /* Must be a writeable data segment descriptor. */ 1513 if (!DescSS.Legacy.Gen.u1DescType) 1514 { 1515 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n", 1516 uNewCS, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 1517 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1518 } 1519 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) 1520 { 1521 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n", 1522 uNewCS, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 1523 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1524 } 1525 1526 /* Present? */ 1527 if (!DescSS.Legacy.Gen.u1Present) 1528 { 1529 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCS, uNewEip, uNewSS, uNewESP)); 1530 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS); 1531 } 1532 1533 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy); 1534 if (DescSS.Legacy.Gen.u1Granularity) 1535 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1536 1537 /* Check EIP. */ 1538 if (uNewEip > cbLimitCS) 1539 { 1540 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n", 1541 uNewCS, uNewEip, uNewSS, uNewESP, cbLimitCS)); 1542 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCS); 1543 } 1544 1545 /* 1546 * Commit the changes, marking CS and SS accessed first since 1547 * that may fail. 1548 */ 1549 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1550 { 1551 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS); 1552 if (rcStrict != VINF_SUCCESS) 1553 return rcStrict; 1554 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1555 } 1556 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1557 { 1558 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS); 1559 if (rcStrict != VINF_SUCCESS) 1560 return rcStrict; 1561 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1562 } 1563 1564 pCtx->rip = uNewEip; 1565 pCtx->cs = uNewCS; 1566 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy); 1567 pCtx->csHid.u32Limit = cbLimitCS; 1568 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy); 1569 pCtx->rsp = uNewESP; 1570 pCtx->ss = uNewSS; 1571 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy); 1572 pCtx->ssHid.u32Limit = cbLimitSS; 1573 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy); 1574 1575 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 1576 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 1577 if (enmEffOpSize != IEMMODE_16BIT) 1578 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 1579 if (pIemCpu->uCpl == 0) 1580 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 1581 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 1582 fEFlagsMask |= X86_EFL_IF; 1583 pCtx->eflags.u &= ~fEFlagsMask; 1584 pCtx->eflags.u |= fEFlagsMask & uNewFlags; 1585 1586 pIemCpu->uCpl = uNewCS & X86_SEL_RPL; 1587 iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid); 1588 iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->es, &pCtx->esHid); 1589 iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid); 1590 iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid); 1591 1592 /* Done! */ 1593 1424 1594 } 1425 1595 /* 1426 * Same level.1596 * Return to the same level. 1427 1597 */ 1428 1598 else
Note:
See TracChangeset
for help on using the changeset viewer.