Changeset 37079 in vbox for trunk/src/VBox
- Timestamp:
- May 13, 2011 3:35:03 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r37061 r37079 45 45 * Header Files * 46 46 *******************************************************************************/ 47 #define LOG_GROUP LOG_GROUP_ EM /** @todo add log group */47 #define LOG_GROUP LOG_GROUP_IEM 48 48 #include <VBox/vmm/iem.h> 49 49 #include <VBox/vmm/pgm.h> … … 69 69 * Structures and Typedefs * 70 70 *******************************************************************************/ 71 /** 72 * Generic pointer union. 73 * @todo move me to iprt/types.h 74 */ 75 typedef union RTPTRUNION 76 { 77 /** Pointer into the void... */ 78 void *pv; 79 /** Pointer to a 8-bit unsigned value. */ 80 uint8_t *pu8; 81 /** Pointer to a 16-bit unsigned value. */ 82 uint16_t *pu16; 83 /** Pointer to a 32-bit unsigned value. */ 84 uint32_t *pu32; 85 /** Pointer to a 64-bit unsigned value. */ 86 uint64_t *pu64; 87 } RTPTRUNION; 88 /** Pointer to a pointer union. */ 89 typedef RTPTRUNION *PRTPTRUNION; 90 91 /** 92 * Generic const pointer union. 93 * @todo move me to iprt/types.h 94 */ 95 typedef union RTCPTRUNION 96 { 97 /** Pointer into the void... */ 98 void const *pv; 99 /** Pointer to a 8-bit unsigned value. */ 100 uint8_t const *pu8; 101 /** Pointer to a 16-bit unsigned value. */ 102 uint16_t const *pu16; 103 /** Pointer to a 32-bit unsigned value. */ 104 uint32_t const *pu32; 105 /** Pointer to a 64-bit unsigned value. */ 106 uint64_t const *pu64; 107 } RTCPTRUNION; 108 /** Pointer to a const pointer union. */ 109 typedef RTCPTRUNION *PRTCPTRUNION; 110 71 111 /** @typedef PFNIEMOP 72 112 * Pointer to an opcode decoder function. … … 139 179 * Defined Constants And Macros * 140 180 *******************************************************************************/ 181 /** @name IEM status codes. 182 * 183 * Not quite sure how this will play out in the end, just aliasing safe status 184 * codes for now. 185 * 186 * @{ */ 187 #define VINF_IEM_RAISED_XCPT VINF_EM_RESCHEDULE 188 /** @} */ 189 141 190 /** Temporary hack to disable the double execution. Will be removed in favor 142 191 * of a dedicated execution mode in EM. */ … … 545 594 static VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel); 546 595 static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess); 596 static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel); 547 597 static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess); 548 598 static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc); 599 static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess); 600 static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess); 549 601 static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); 550 602 static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem); … … 552 604 static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp); 553 605 static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp); 606 static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel); 554 607 555 608 #ifdef IEM_VERIFICATION_MODE … … 1469 1522 iemRaiseXcptAdjustState(pCtx, u8Vector); 1470 1523 1471 return VINF_SUCCESS;1524 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; 1472 1525 } 1473 1526 … … 1495 1548 uint64_t uCr2) 1496 1549 { 1497 Log(("iemRaiseXcptOrIntInProtMode: %#x at %04x:%08RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",1498 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2));1499 1500 1550 /* 1501 1551 * Read the IDT entry. … … 1619 1669 ? Idte.Gate.u16OffsetLow 1620 1670 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16); 1621 uint32_t cbLimit = X86DESC_LIMIT(DescCS.Legacy);1671 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy); 1622 1672 if (DescCS.Legacy.Gen.u1Granularity) 1623 cbLimit = (cbLimit<< PAGE_SHIFT) | PAGE_OFFSET_MASK;1624 if (uNewEip > X86DESC_LIMIT(DescCS.Legacy))1673 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1674 if (uNewEip > cbLimitCS) 1625 1675 { 1626 1676 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n", … … 1642 1692 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF 1643 1693 ? pIemCpu->uCpl : DescCS.Legacy.Gen.u2Dpl; 1644 uint32_t uNewEsp;1645 RTSEL NewSS;1646 uint32_t fNewSSAttr;1647 uint32_t cbNewSSLimit;1648 uint64_t uNewSSBase;1649 1650 1694 if (uNewCpl != pIemCpu->uCpl) 1651 1695 { 1696 RTSEL NewSS; 1697 uint32_t uNewEsp; 1652 1698 rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp); 1653 1699 if (rcStrict != VINF_SUCCESS) 1654 1700 return rcStrict; 1701 1655 1702 IEMSELDESC DescSS; 1656 1703 rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS); … … 1658 1705 return rcStrict; 1659 1706 1660 fNewSSAttr = X86DESC_GET_HID_ATTR(DescSS.Legacy);1661 cbNewSSLimit= X86DESC_LIMIT(DescSS.Legacy);1707 /* Check that there is sufficient space for the stack frame. */ 1708 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy); 1662 1709 if (DescSS.Legacy.Gen.u1Granularity) 1663 cbNewSSLimit = (cbNewSSLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1664 uNewSSBase = X86DESC_BASE(DescSS.Legacy); 1665 } 1710 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1711 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_NOT_IMPLEMENTED); 1712 1713 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20; 1714 if ( uNewEsp - 1 > cbLimitSS 1715 || uNewEsp < cbStackFrame) 1716 { 1717 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n", 1718 u8Vector, NewSS, uNewEsp, cbStackFrame)); 1719 return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS); 1720 } 1721 1722 /* 1723 * Start making changes. 1724 */ 1725 1726 /* Create the stack frame. */ 1727 RTPTRUNION uStackFrame; 1728 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 1729 uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W); 1730 if (rcStrict != VINF_SUCCESS) 1731 return rcStrict; 1732 void * const pvStackFrame = uStackFrame.pv; 1733 1734 if (fFlags & IEM_XCPT_FLAGS_ERR) 1735 *uStackFrame.pu32++ = uErr; 1736 uStackFrame.pu32[0] = pCtx->eip; 1737 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl; 1738 uStackFrame.pu32[2] = pCtx->eflags.u; 1739 uStackFrame.pu32[3] = pCtx->esp; 1740 uStackFrame.pu32[4] = pCtx->ss; 1741 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); 1742 if (rcStrict != VINF_SUCCESS) 1743 return rcStrict; 1744 1745 /* Mark the selectors 'accessed' (hope this is the correct time). */ 1746 /** @todo testcase: excatly _when_ are the accessed bits set - before or 1747 * after pushing the stack frame? (Write protect the gdt + stack to 1748 * find out.) */ 1749 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1750 { 1751 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS); 1752 if (rcStrict != VINF_SUCCESS) 1753 return rcStrict; 1754 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1755 } 1756 1757 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1758 { 1759 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS); 1760 if (rcStrict != VINF_SUCCESS) 1761 return rcStrict; 1762 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1763 } 1764 1765 /* 1766 * Start commint the register changes (joins with the DPL=CPL branch). 1767 */ 1768 pCtx->ss = NewSS; 1769 pCtx->ssHid.u32Limit = cbLimitSS; 1770 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy); 1771 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy); 1772 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */ 1773 pIemCpu->uCpl = uNewCpl; 1774 } 1775 /* 1776 * Same privilege, no stack change and smaller stack frame. 1777 */ 1666 1778 else 1667 1779 { 1668 uNewEsp = pCtx->esp; 1669 NewSS = pCtx->ss; 1670 fNewSSAttr = pCtx->ssHid.Attr.u; 1671 cbNewSSLimit = pCtx->ssHid.u32Limit; 1672 uNewSSBase = pCtx->ssHid.u64Base; 1673 } 1674 1675 /* 1676 * Check if we have the space for the stack frame. 1677 */ 1678 1679 1680 /* 1681 * Set the CS and maybe SS accessed bits. 1682 */ 1683 /** @todo testcase: excatly when is the accessed bit set, before or after 1684 * pushing the stack frame. (write protect the gdt + stack to find 1685 * out). */ 1686 1687 1688 return VERR_NOT_IMPLEMENTED; 1780 uint64_t uNewRsp; 1781 RTPTRUNION uStackFrame; 1782 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12; 1783 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp); 1784 if (rcStrict != VINF_SUCCESS) 1785 return rcStrict; 1786 void * const pvStackFrame = uStackFrame.pv; 1787 1788 if (fFlags & IEM_XCPT_FLAGS_ERR) 1789 *uStackFrame.pu32++ = uErr; 1790 uStackFrame.pu32[0] = pCtx->eip; 1791 uStackFrame.pu32[1] = (pCtx->cs & ~X86_SEL_RPL) | pIemCpu->uCpl; 1792 uStackFrame.pu32[2] = pCtx->eflags.u; 1793 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */ 1794 if (rcStrict != VINF_SUCCESS) 1795 return rcStrict; 1796 1797 /* Mark the CS selector as 'accessed'. */ 1798 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1799 { 1800 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS); 1801 if (rcStrict != VINF_SUCCESS) 1802 return rcStrict; 1803 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1804 } 1805 1806 /* 1807 * Start committing the register changes (joins with the other branch). 1808 */ 1809 pCtx->rsp = uNewRsp; 1810 } 1811 1812 /* ... register commiting continues. */ 1813 pCtx->cs = (NewCS & ~X86_SEL_RPL) | uNewCpl; 1814 pCtx->csHid.u32Limit = cbLimitCS; 1815 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy); 1816 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy); 1817 1818 pCtx->rip = uNewEip; 1819 pCtx->rflags.u &= ~fEflToClear; 1820 1821 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS; 1689 1822 } 1690 1823 … … 1766 1899 uint64_t uCr2) 1767 1900 { 1901 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1902 1768 1903 /* 1769 1904 * Do recursion accounting. 1770 1905 */ 1771 uint8_t uPrevXcpt = pIemCpu->uCurXcpt; 1772 if (pIemCpu->cXcptRecursions > 0) 1773 { 1906 uint8_t const uPrevXcpt = pIemCpu->uCurXcpt; 1907 if (pIemCpu->cXcptRecursions == 0) 1908 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n", 1909 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2)); 1910 else 1911 { 1912 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d\n", 1913 u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1)); 1914 1774 1915 /** @todo double and tripple faults. */ 1775 1916 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_NOT_IMPLEMENTED); … … 1779 1920 1780 1921 /* 1781 * Call mode specific worker function.1922 * Call the mode specific worker function. 1782 1923 */ 1783 1924 VBOXSTRICTRC rcStrict; 1784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1785 1925 if (!(pCtx->cr0 & X86_CR0_PE)) 1786 1926 rcStrict = iemRaiseXcptOrIntInRealMode( pIemCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2); … … 1797 1937 pIemCpu->cXcptRecursions--; 1798 1938 pIemCpu->uCurXcpt = uPrevXcpt; 1939 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv\n", 1940 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs, pCtx->rip, pCtx->ss, pCtx->esp)); 1799 1941 return rcStrict; 1800 1942 } … … 1904 2046 /** \#GP(sel) - 0d. */ 1905 2047 static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess) 2048 { 2049 AssertFailed(/** @todo implement this */); 2050 return VERR_NOT_IMPLEMENTED; 2051 } 2052 2053 2054 /** \#GP(sel) - 0d. */ 2055 static VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel) 1906 2056 { 1907 2057 AssertFailed(/** @todo implement this */); … … 3061 3211 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024); 3062 3212 3063 AssertFailed(); /** @todo implement me. */ 3064 return 1024; 3065 3213 for (unsigned i = 0; i < RT_ELEMENTS(pIemCpu->aMemMappings); i++) 3214 if (pIemCpu->aMemMappings[i].fAccess == IEM_ACCESS_INVALID) 3215 return i; 3216 3217 AssertFailedReturn(1024); 3066 3218 } 3067 3219 … … 4781 4933 #ifdef DEBUG 4782 4934 # define IEMOP_MNEMONIC(a_szMnemonic) \ 4783 Log2(("decode - %04x:% 08RGv %s%s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \4935 Log2(("decode - %04x:%RGv %s%s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \ 4784 4936 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic)) 4785 4937 # define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \ 4786 Log2(("decode - %04x:% 08RGv %s%s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \4938 Log2(("decode - %04x:%RGv %s%s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \ 4787 4939 pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps)) 4788 4940 #else … … 5142 5294 static void iemExecVerificationModeSetup(PIEMCPU pIemCpu) 5143 5295 { 5296 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu); 5144 5297 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx); 5145 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */5298 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */ 5146 5299 5147 5300 #if 0 … … 5159 5312 } 5160 5313 #endif 5314 #if 0 /* auto enable on first paged protected mode interrupt */ 5315 if ( pIemCpu->fNoRem 5316 && pOrgCtx->eflags.Bits.u1IF 5317 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG) 5318 && TRPMHasTrap(pVCpu) 5319 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) 5320 { 5321 RTLogFlags(NULL, "enabled"); 5322 pIemCpu->fNoRem = false; 5323 } 5324 #endif 5161 5325 5162 5326 /* … … 5174 5338 * See if there is an interrupt pending in TRPM and inject it if we can. 5175 5339 */ 5176 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);5177 5340 if ( pOrgCtx->eflags.Bits.u1IF 5178 5341 && TRPMHasTrap(pVCpu) … … 5635 5798 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu))) 5636 5799 { 5637 if (pIemCpu->cInstructions != 1) 5638 { 5639 RTAssertMsg2Weak(" the FPU state differs\n"); 5640 cDiffs++; 5641 CHECK_FIELD(fpu.FCW); 5642 CHECK_FIELD(fpu.FSW); 5643 CHECK_FIELD(fpu.FTW); 5644 CHECK_FIELD(fpu.FOP); 5645 CHECK_FIELD(fpu.FPUIP); 5646 CHECK_FIELD(fpu.CS); 5647 CHECK_FIELD(fpu.Rsrvd1); 5648 CHECK_FIELD(fpu.FPUDP); 5649 CHECK_FIELD(fpu.DS); 5650 CHECK_FIELD(fpu.Rsrvd2); 5651 CHECK_FIELD(fpu.MXCSR); 5652 CHECK_FIELD(fpu.MXCSR_MASK); 5653 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]); 5654 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]); 5655 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]); 5656 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]); 5657 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]); 5658 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]); 5659 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]); 5660 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]); 5661 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]); 5662 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]); 5663 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]); 5664 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]); 5665 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]); 5666 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]); 5667 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]); 5668 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]); 5669 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]); 5670 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]); 5671 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]); 5672 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]); 5673 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]); 5674 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]); 5675 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]); 5676 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]); 5677 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++) 5678 CHECK_FIELD(fpu.au32RsrvdRest[i]); 5679 } 5680 else 5681 RTAssertMsg2Weak(" the FPU state differs - happens the first time...\n"); 5800 RTAssertMsg2Weak(" the FPU state differs\n"); 5801 cDiffs++; 5802 CHECK_FIELD(fpu.FCW); 5803 CHECK_FIELD(fpu.FSW); 5804 CHECK_FIELD(fpu.FTW); 5805 CHECK_FIELD(fpu.FOP); 5806 CHECK_FIELD(fpu.FPUIP); 5807 CHECK_FIELD(fpu.CS); 5808 CHECK_FIELD(fpu.Rsrvd1); 5809 CHECK_FIELD(fpu.FPUDP); 5810 CHECK_FIELD(fpu.DS); 5811 CHECK_FIELD(fpu.Rsrvd2); 5812 CHECK_FIELD(fpu.MXCSR); 5813 CHECK_FIELD(fpu.MXCSR_MASK); 5814 CHECK_FIELD(fpu.aRegs[0].au64[0]); CHECK_FIELD(fpu.aRegs[0].au64[1]); 5815 CHECK_FIELD(fpu.aRegs[1].au64[0]); CHECK_FIELD(fpu.aRegs[1].au64[1]); 5816 CHECK_FIELD(fpu.aRegs[2].au64[0]); CHECK_FIELD(fpu.aRegs[2].au64[1]); 5817 CHECK_FIELD(fpu.aRegs[3].au64[0]); CHECK_FIELD(fpu.aRegs[3].au64[1]); 5818 CHECK_FIELD(fpu.aRegs[4].au64[0]); CHECK_FIELD(fpu.aRegs[4].au64[1]); 5819 CHECK_FIELD(fpu.aRegs[5].au64[0]); CHECK_FIELD(fpu.aRegs[5].au64[1]); 5820 CHECK_FIELD(fpu.aRegs[6].au64[0]); CHECK_FIELD(fpu.aRegs[6].au64[1]); 5821 CHECK_FIELD(fpu.aRegs[7].au64[0]); CHECK_FIELD(fpu.aRegs[7].au64[1]); 5822 CHECK_FIELD(fpu.aXMM[ 0].au64[0]); CHECK_FIELD(fpu.aXMM[ 0].au64[1]); 5823 CHECK_FIELD(fpu.aXMM[ 1].au64[0]); CHECK_FIELD(fpu.aXMM[ 1].au64[1]); 5824 CHECK_FIELD(fpu.aXMM[ 2].au64[0]); CHECK_FIELD(fpu.aXMM[ 2].au64[1]); 5825 CHECK_FIELD(fpu.aXMM[ 3].au64[0]); CHECK_FIELD(fpu.aXMM[ 3].au64[1]); 5826 CHECK_FIELD(fpu.aXMM[ 4].au64[0]); CHECK_FIELD(fpu.aXMM[ 4].au64[1]); 5827 CHECK_FIELD(fpu.aXMM[ 5].au64[0]); CHECK_FIELD(fpu.aXMM[ 5].au64[1]); 5828 CHECK_FIELD(fpu.aXMM[ 6].au64[0]); CHECK_FIELD(fpu.aXMM[ 6].au64[1]); 5829 CHECK_FIELD(fpu.aXMM[ 7].au64[0]); CHECK_FIELD(fpu.aXMM[ 7].au64[1]); 5830 CHECK_FIELD(fpu.aXMM[ 8].au64[0]); CHECK_FIELD(fpu.aXMM[ 8].au64[1]); 5831 CHECK_FIELD(fpu.aXMM[ 9].au64[0]); CHECK_FIELD(fpu.aXMM[ 9].au64[1]); 5832 CHECK_FIELD(fpu.aXMM[10].au64[0]); CHECK_FIELD(fpu.aXMM[10].au64[1]); 5833 CHECK_FIELD(fpu.aXMM[11].au64[0]); CHECK_FIELD(fpu.aXMM[11].au64[1]); 5834 CHECK_FIELD(fpu.aXMM[12].au64[0]); CHECK_FIELD(fpu.aXMM[12].au64[1]); 5835 CHECK_FIELD(fpu.aXMM[13].au64[0]); CHECK_FIELD(fpu.aXMM[13].au64[1]); 5836 CHECK_FIELD(fpu.aXMM[14].au64[0]); CHECK_FIELD(fpu.aXMM[14].au64[1]); 5837 CHECK_FIELD(fpu.aXMM[15].au64[0]); CHECK_FIELD(fpu.aXMM[15].au64[1]); 5838 for (unsigned i = 0; i < RT_ELEMENTS(pOrgCtx->fpu.au32RsrvdRest); i++) 5839 CHECK_FIELD(fpu.au32RsrvdRest[i]); 5682 5840 } 5683 5841 CHECK_FIELD(rip); … … 5709 5867 } 5710 5868 5711 if (pIemCpu->cIOReads != 1 )5869 if (pIemCpu->cIOReads != 1 && !pIemCpu->fIgnoreRaxRdx) 5712 5870 CHECK_FIELD(rax); 5713 5871 CHECK_FIELD(rcx); 5714 CHECK_FIELD(rdx); 5872 if (!pIemCpu->fIgnoreRaxRdx) 5873 CHECK_FIELD(rdx); 5715 5874 CHECK_FIELD(rbx); 5716 5875 CHECK_FIELD(rsp); -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r37058 r37079 955 955 return rcStrict; 956 956 uint32_t uNewEip; 957 uint16_t uNewC s;957 uint16_t uNewCS; 958 958 if (enmEffOpSize == IEMMODE_32BIT) 959 959 { 960 uNewC s= pu16Frame[2];960 uNewCS = pu16Frame[2]; 961 961 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]); 962 962 } 963 963 else 964 964 { 965 uNewC s= pu16Frame[1];965 uNewCS = pu16Frame[1]; 966 966 uNewEip = pu16Frame[0]; 967 967 } … … 979 979 return rcStrict; 980 980 pCtx->rip = uNewEip; 981 pCtx->cs = uNewC s;982 pCtx->csHid.u64Base = (uint32_t)uNewC s<< 4;981 pCtx->cs = uNewCS; 982 pCtx->csHid.u64Base = (uint32_t)uNewCS << 4; 983 983 /** @todo do we load attribs and limit as well? */ 984 984 if (cbPop) … … 1134 1134 1135 1135 /** 1136 * Implements iret .1136 * Implements iret for real mode and V8086 mode. 1137 1137 * 1138 1138 * @param enmEffOpSize The effective operand size. 1139 1139 */ 1140 IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize) 1141 { 1142 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1140 IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize) 1141 { 1142 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1143 1144 /* 1145 * iret throws an exception if VME isn't enabled. 1146 */ 1147 if ( pCtx->eflags.Bits.u1VM 1148 && !(pCtx->cr4 & X86_CR4_VME)) 1149 return iemRaiseGeneralProtectionFault0(pIemCpu); 1150 1151 /* 1152 * Do the stack bits, but don't commit RSP before everything checks 1153 * out right. 1154 */ 1155 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 1143 1156 VBOXSTRICTRC rcStrict; 1157 RTCPTRUNION uFrame; 1158 uint16_t uNewCS; 1159 uint32_t uNewEip; 1160 uint32_t uNewFlags; 1144 1161 uint64_t uNewRsp; 1145 1146 /* 1147 * Real mode is easy, V8086 mode is relative similar. 1148 */ 1149 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT 1150 && IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 1151 { 1152 /* iret throws an exception if VME isn't enabled. */ 1153 if ( pCtx->eflags.Bits.u1VM 1154 && !(pCtx->cr4 & X86_CR4_VME)) 1162 if (enmEffOpSize == IEMMODE_32BIT) 1163 { 1164 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp); 1165 if (rcStrict != VINF_SUCCESS) 1166 return rcStrict; 1167 uNewEip = uFrame.pu32[0]; 1168 uNewCS = (uint16_t)uFrame.pu32[1]; 1169 uNewFlags = uFrame.pu32[2]; 1170 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 1171 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT 1172 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/ 1173 | X86_EFL_ID; 1174 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1); 1175 } 1176 else 1177 { 1178 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp); 1179 if (rcStrict != VINF_SUCCESS) 1180 return rcStrict; 1181 uNewEip = uFrame.pu16[0]; 1182 uNewCS = uFrame.pu16[1]; 1183 uNewFlags = uFrame.pu16[2]; 1184 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 1185 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT; 1186 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1); 1187 /** @todo The intel pseudo code does not indicate what happens to 1188 * reserved flags. We just ignore them. */ 1189 } 1190 /** @todo Check how this is supposed to work if sp=0xfffe. */ 1191 1192 /* 1193 * Check the limit of the new EIP. 1194 */ 1195 /** @todo Only the AMD pseudo code check the limit here, what's 1196 * right? */ 1197 if (uNewEip > pCtx->csHid.u32Limit) 1198 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1199 1200 /* 1201 * V8086 checks and flag adjustments 1202 */ 1203 if (pCtx->eflags.Bits.u1VM) 1204 { 1205 if (pCtx->eflags.Bits.u2IOPL == 3) 1206 { 1207 /* Preserve IOPL and clear RF. */ 1208 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF); 1209 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL); 1210 } 1211 else if ( enmEffOpSize == IEMMODE_16BIT 1212 && ( !(uNewFlags & X86_EFL_IF) 1213 || !pCtx->eflags.Bits.u1VIP ) 1214 && !(uNewFlags & X86_EFL_TF) ) 1215 { 1216 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/ 1217 uNewFlags &= ~X86_EFL_VIF; 1218 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9); 1219 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF); 1220 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL); 1221 } 1222 else 1155 1223 return iemRaiseGeneralProtectionFault0(pIemCpu); 1156 1157 /* Do the stack bits, but don't commit RSP before everything checks 1158 out right. */ 1159 union 1160 { 1161 uint32_t const *pu32; 1162 uint16_t const *pu16; 1163 void const *pv; 1164 } uFrame; 1224 } 1225 1226 /* 1227 * Commit the operation. 1228 */ 1229 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp); 1230 if (rcStrict != VINF_SUCCESS) 1231 return rcStrict; 1232 pCtx->rip = uNewEip; 1233 pCtx->cs = uNewCS; 1234 pCtx->csHid.u64Base = (uint32_t)uNewCS << 4; 1235 /** @todo do we load attribs and limit as well? */ 1236 Assert(uNewFlags & X86_EFL_1); 1237 pCtx->eflags.u = uNewFlags; 1238 1239 return VINF_SUCCESS; 1240 } 1241 1242 1243 /** 1244 * Implements iret for protected mode 1245 * 1246 * @param enmEffOpSize The effective operand size. 1247 */ 1248 IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize) 1249 { 1250 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1251 1252 /* 1253 * Nested task return. 1254 */ 1255 if (pCtx->eflags.Bits.u1NT) 1256 { 1257 AssertFailedReturn(VERR_NOT_IMPLEMENTED); 1258 } 1259 /* 1260 * Normal return. 1261 */ 1262 else 1263 { 1264 /* 1265 * Do the stack bits, but don't commit RSP before everything checks 1266 * out right. 1267 */ 1165 1268 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 1166 uint16_t uNewCs; 1167 uint32_t uNewEip; 1168 uint32_t uNewFlags; 1269 VBOXSTRICTRC rcStrict; 1270 RTCPTRUNION uFrame; 1271 uint16_t uNewCS; 1272 uint32_t uNewEip; 1273 uint32_t uNewFlags; 1274 uint64_t uNewRsp; 1169 1275 if (enmEffOpSize == IEMMODE_32BIT) 1170 1276 { … … 1173 1279 return rcStrict; 1174 1280 uNewEip = uFrame.pu32[0]; 1175 uNewC s= (uint16_t)uFrame.pu32[1];1281 uNewCS = (uint16_t)uFrame.pu32[1]; 1176 1282 uNewFlags = uFrame.pu32[2]; 1177 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF1178 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT1179 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/1180 | X86_EFL_ID;1181 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);1182 1283 } 1183 1284 else … … 1187 1288 return rcStrict; 1188 1289 uNewEip = uFrame.pu16[0]; 1189 uNewC s= uFrame.pu16[1];1290 uNewCS = uFrame.pu16[1]; 1190 1291 uNewFlags = uFrame.pu16[2]; 1191 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 1192 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT; 1193 uNewFlags |= pCtx->eflags.u & (UINT16_C(0xffff0000) | X86_EFL_1); 1194 /** @todo The intel pseudo code does not indicate what happens to 1195 * reserved flags. We just ignore them. */ 1196 } 1197 /** @todo Check how this is supposed to work if sp=0xfffe. */ 1198 1199 /* Check the limit of the new EIP. */ 1200 /** @todo Only the AMD pseudo code check the limit here, what's 1201 * right? */ 1202 if (uNewEip > pCtx->csHid.u32Limit) 1203 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1204 1205 /* V8086 checks and flag adjustments */ 1206 if (pCtx->eflags.Bits.u1VM) 1207 { 1208 if (pCtx->eflags.Bits.u2IOPL == 3) 1209 { 1210 /* Preserve IOPL and clear RF. */ 1211 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF); 1212 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL); 1213 } 1214 else if ( enmEffOpSize == IEMMODE_16BIT 1215 && ( !(uNewFlags & X86_EFL_IF) 1216 || !pCtx->eflags.Bits.u1VIP ) 1217 && !(uNewFlags & X86_EFL_TF) ) 1218 { 1219 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/ 1220 uNewFlags &= ~X86_EFL_VIF; 1221 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9); 1222 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF); 1223 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL); 1224 } 1225 else 1226 return iemRaiseGeneralProtectionFault0(pIemCpu); 1227 } 1228 1229 /* commit the operation. */ 1230 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp); 1292 } 1293 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ 1231 1294 if (rcStrict != VINF_SUCCESS) 1232 1295 return rcStrict; 1233 pCtx->rip = uNewEip; 1234 pCtx->cs = uNewCs; 1235 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4; 1236 /** @todo do we load attribs and limit as well? */ 1237 Assert(uNewFlags & X86_EFL_1); 1238 pCtx->eflags.u = uNewFlags; 1239 1240 return VINF_SUCCESS; 1241 } 1242 1243 1244 AssertFailed(); 1296 1297 /* 1298 * What are we returning to? 1299 */ 1300 if ( (uNewFlags & X86_EFL_VM) 1301 && pIemCpu->uCpl == 0) 1302 { 1303 /* V8086 mode! */ 1304 AssertFailedReturn(VERR_NOT_IMPLEMENTED); 1305 } 1306 else 1307 { 1308 /* 1309 * Protected mode. 1310 */ 1311 /* Read the CS descriptor. */ 1312 if (!(uNewCS & (X86_SEL_MASK | X86_SEL_LDT))) 1313 { 1314 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCS, uNewEip)); 1315 return iemRaiseGeneralProtectionFault0(pIemCpu); 1316 } 1317 1318 IEMSELDESC DescCS; 1319 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS); 1320 if (rcStrict != VINF_SUCCESS) 1321 return rcStrict; 1322 1323 /* Must be a code descriptor. */ 1324 if (!DescCS.Legacy.Gen.u1DescType) 1325 { 1326 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type)); 1327 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1328 } 1329 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 1330 { 1331 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type)); 1332 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1333 } 1334 1335 /* Privilege checks. */ 1336 if ((uNewCS & X86_SEL_RPL) < pIemCpu->uCpl) 1337 { 1338 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCS, uNewEip, pIemCpu->uCpl)); 1339 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1340 } 1341 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1342 && (uNewCS & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl) 1343 { 1344 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u2Dpl)); 1345 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS); 1346 } 1347 1348 /* Present? */ 1349 if (!DescCS.Legacy.Gen.u1Present) 1350 { 1351 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCS, uNewEip)); 1352 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS); 1353 } 1354 1355 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy); 1356 if (DescCS.Legacy.Gen.u1Granularity) 1357 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1358 1359 /* 1360 * Different level? 1361 */ 1362 if ((uNewCS & X86_SEL_RPL) != pIemCpu->uCpl) 1363 { 1364 AssertFailedReturn(VERR_NOT_IMPLEMENTED); 1365 } 1366 /* 1367 * Same level. 1368 */ 1369 else 1370 { 1371 /* Check EIP. */ 1372 if (uNewEip > cbLimitCS) 1373 { 1374 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewEip, cbLimitCS)); 1375 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCS); 1376 } 1377 1378 /* 1379 * Commit the changes, marking CS first since it may fail. 1380 */ 1381 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1382 { 1383 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS); 1384 if (rcStrict != VINF_SUCCESS) 1385 return rcStrict; 1386 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1387 } 1388 1389 pCtx->rip = uNewEip; 1390 pCtx->cs = uNewCS; 1391 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy); 1392 pCtx->csHid.u32Limit = cbLimitCS; 1393 pCtx->csHid.u64Base = X86DESC_BASE(DescCS.Legacy); 1394 pCtx->rsp = uNewRsp; 1395 1396 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 1397 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 1398 if (enmEffOpSize != IEMMODE_16BIT) 1399 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 1400 if (pIemCpu->uCpl == 0) 1401 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 1402 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 1403 fEFlagsMask |= X86_EFL_IF; 1404 pCtx->eflags.u &= ~fEFlagsMask; 1405 pCtx->eflags.u |= fEFlagsMask & uNewFlags; 1406 /* Done! */ 1407 } 1408 } 1409 } 1410 1411 return VINF_SUCCESS; 1412 } 1413 1414 1415 /** 1416 * Implements iret for long mode 1417 * 1418 * @param enmEffOpSize The effective operand size. 1419 */ 1420 IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize) 1421 { 1422 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1423 //VBOXSTRICTRC rcStrict; 1424 //uint64_t uNewRsp; 1425 1245 1426 return VERR_NOT_IMPLEMENTED; 1427 } 1428 1429 1430 /** 1431 * Implements iret. 1432 * 1433 * @param enmEffOpSize The effective operand size. 1434 */ 1435 IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize) 1436 { 1437 /* 1438 * Call a mode specific worker. 1439 */ 1440 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT 1441 && IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 1442 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize); 1443 if (IEM_IS_LONG_MODE(pIemCpu)) 1444 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize); 1445 1446 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize); 1246 1447 } 1247 1448 … … 2449 2650 pCtx->rax = (uint32_t)uTicks; 2450 2651 pCtx->rdx = uTicks >> 32; 2652 #ifdef IEM_VERIFICATION_MODE 2653 pIemCpu->fIgnoreRaxRdx = true; 2654 #endif 2451 2655 2452 2656 iemRegAddToRip(pIemCpu, cbInstr); … … 2736 2940 pCtx->fpu.FCW = 0x37f; 2737 2941 pCtx->fpu.FSW = 0; 2738 pCtx->fpu.FTW = 0x ff;2942 pCtx->fpu.FTW = 0x00; /* 0 - empty. */ 2739 2943 pCtx->fpu.FPUDP = 0; 2740 2944 pCtx->fpu.DS = 0; //?? … … 2748 2952 pFpu->FCW = 0x37f; 2749 2953 pFpu->FSW = 0; 2750 pFpu->FTW = 0xffff; 2954 pFpu->FTW = 0xffff; /* 11 - empty */ 2751 2955 pFpu->FPUOO = 0; //?? 2752 2956 pFpu->FPUOS = 0; //?? -
trunk/src/VBox/VMM/include/IEMInternal.h
r37034 r37079 172 172 * This is used to skip past really slow bits. */ 173 173 bool fNoRem; 174 bool afAlignment1[3]; 174 /** Indicates that RAX and RDX differences should be ignored since RDTSC 175 * and RDTSCP are timing sensitive. */ 176 bool fIgnoreRaxRdx; 177 bool afAlignment1[2]; 175 178 /** Mask of undefined eflags. 176 179 * The verifier will any difference in these flags. */ … … 316 319 /** Stack read alias. */ 317 320 #define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK) 321 /** Stack read+write alias. */ 322 #define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK) 318 323 /** @} */ 319 324
Note:
See TracChangeset
for help on using the changeset viewer.