Changeset 42437 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 27, 2012 2:51:48 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r42427 r42437 192 192 193 193 /** 194 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the 195 * occation. 196 */ 197 #ifdef LOG_ENABLED 198 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \ 199 do { \ 200 Log(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \ 201 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \ 202 } while (0) 203 #else 204 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \ 205 return VERR_IEM_ASPECT_NOT_IMPLEMENTED 206 #endif 207 208 /** 209 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the 210 * occation using the supplied logger statement. 211 * 212 * @param a_LoggerArgs What to log on failure. 213 */ 214 #ifdef LOG_ENABLED 215 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \ 216 do { \ 217 LogFunc(a_LoggerArgs); \ 218 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \ 219 } while (0) 220 #else 221 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \ 222 return VERR_IEM_ASPECT_NOT_IMPLEMENTED 223 #endif 224 225 /** 194 226 * Call an opcode decoder function. 195 227 * … … 1944 1976 /* Check that there is sufficient space for the stack frame. */ 1945 1977 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy); 1946 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 1978 if (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN) 1979 { 1980 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */ 1981 } 1947 1982 1948 1983 uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20; … … 2093 2128 { 2094 2129 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2); 2095 AssertMsgFailed(("V8086 exception / interrupt dispatching\n"));2096 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;2130 /** @todo implement me. */ 2131 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n")); 2097 2132 } 2098 2133 … … 2121 2156 { 2122 2157 NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2); 2123 AssertMsgFailed(("long mode exception / interrupt dispatching\n"));2124 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;2158 /** @todo implement me. */ 2159 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("long mode exception / interrupt dispatching\n")); 2125 2160 } 2126 2161 … … 2164 2199 2165 2200 /** @todo double and tripple faults. */ 2166 AssertReturn(pIemCpu->cXcptRecursions < 3, VERR_IEM_ASPECT_NOT_IMPLEMENTED); 2201 if (pIemCpu->cXcptRecursions >= 3) 2202 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n")); 2167 2203 2168 2204 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate. … … 2600 2636 } 2601 2637 2638 /** 2639 * Complains about a stub. 2640 * 2641 * Providing two versions of this macro, one for daily use and one for use when 2642 * working on IEM. 2643 */ 2644 #if 0 2645 # define IEMOP_BITCH_ABOUT_STUB() \ 2646 do { \ 2647 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \ 2648 iemOpStubMsg2(pIemCpu); \ 2649 RTAssertPanic(); \ 2650 } while (0) 2651 #else 2652 # define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__)); 2653 #endif 2602 2654 2603 2655 /** Stubs an opcode. */ … … 2605 2657 FNIEMOP_DEF(a_Name) \ 2606 2658 { \ 2607 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \ 2608 iemOpStubMsg2(pIemCpu); \ 2609 RTAssertPanic(); \ 2659 IEMOP_BITCH_ABOUT_STUB(); \ 2610 2660 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \ 2611 2661 } \ … … 2616 2666 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \ 2617 2667 { \ 2618 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \ 2619 iemOpStubMsg2(pIemCpu); \ 2620 RTAssertPanic(); \ 2668 IEMOP_BITCH_ABOUT_STUB(); \ 2621 2669 NOREF(a_Name0); \ 2622 2670 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \ … … 4310 4358 { 4311 4359 /** @todo implement expand down segments. */ 4312 AssertFailed(/** @todo implement this */); 4313 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; 4360 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); 4314 4361 } 4315 4362 } … … 7843 7890 } 7844 7891 7892 if (rcStrict != VINF_SUCCESS) 7893 { 7894 if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED) 7895 pIemCpu->cRetAspectNotImplemented++; 7896 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED) 7897 pIemCpu->cRetInstrNotImplemented++; 7898 else if (RT_SUCCESS(rcStrict)) 7899 pIemCpu->cRetInfStatuses++; 7900 else 7901 pIemCpu->cRetErrStatuses++; 7902 } 7903 7845 7904 return rcStrict; 7846 7905 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r42432 r42437 40 40 { 41 41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */ 42 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);42 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n")); 43 43 } 44 44 return VINF_SUCCESS; … … 778 778 { 779 779 /* Call various functions to do the work. */ 780 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);780 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 781 781 } 782 782 … … 794 794 { 795 795 /* Call various functions to do the work. */ 796 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);796 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 797 797 } 798 798 … … 810 810 { 811 811 /* Call various functions to do the work. */ 812 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);812 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 813 813 } 814 814 … … 1899 1899 1900 1900 /** 1901 * Implements iret for protected mode returning to V8086 mode. 1902 * 1903 * @param enmEffOpSize The effective operand size. 1904 * @param uNewEip The new EIP. 1905 * @param uNewCs The new CS. 1906 * @param uNewFlags The new EFLAGS. 1907 * @param uNewRsp The RSP after the initial IRET frame. 1908 */ 1909 IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, IEMMODE, enmEffOpSize, uint32_t, uNewEip, uint16_t, uNewCs, 1910 uint32_t, uNewFlags, uint64_t, uNewRsp) 1911 { 1912 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 1913 } 1914 1915 1916 /** 1917 * Implements iret for protected mode returning via a nested task. 1918 * 1919 * @param enmEffOpSize The effective operand size. 1920 */ 1921 IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize) 1922 { 1923 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 1924 } 1925 1926 1927 /** 1901 1928 * Implements iret for protected mode 1902 1929 * … … 1908 1935 NOREF(cbInstr); 1909 1936 1910 Log(("iemCImpl_iret_prot: rip=%#llx ds=%#x es=%#x\n", pCtx->rip, pCtx->ds.Sel, pCtx->es.Sel));1911 1912 1937 /* 1913 1938 * Nested task return. 1914 1939 */ 1915 1940 if (pCtx->eflags.Bits.u1NT) 1916 { 1917 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 1918 } 1941 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize); 1942 1919 1943 /* 1920 1944 * Normal return. 1921 */ 1945 * 1946 * Do the stack bits, but don't commit RSP before everything checks 1947 * out right. 1948 */ 1949 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 1950 VBOXSTRICTRC rcStrict; 1951 RTCPTRUNION uFrame; 1952 uint16_t uNewCs; 1953 uint32_t uNewEip; 1954 uint32_t uNewFlags; 1955 uint64_t uNewRsp; 1956 if (enmEffOpSize == IEMMODE_32BIT) 1957 { 1958 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp); 1959 if (rcStrict != VINF_SUCCESS) 1960 return rcStrict; 1961 uNewEip = uFrame.pu32[0]; 1962 uNewCs = (uint16_t)uFrame.pu32[1]; 1963 uNewFlags = uFrame.pu32[2]; 1964 } 1922 1965 else 1923 1966 { 1924 /* 1925 * Do the stack bits, but don't commit RSP before everything checks 1926 * out right. 1927 */ 1928 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 1929 VBOXSTRICTRC rcStrict; 1930 RTCPTRUNION uFrame; 1931 uint16_t uNewCs; 1932 uint32_t uNewEip; 1933 uint32_t uNewFlags; 1934 uint64_t uNewRsp; 1967 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp); 1968 if (rcStrict != VINF_SUCCESS) 1969 return rcStrict; 1970 uNewEip = uFrame.pu16[0]; 1971 uNewCs = uFrame.pu16[1]; 1972 uNewFlags = uFrame.pu16[2]; 1973 } 1974 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ 1975 if (rcStrict != VINF_SUCCESS) 1976 return rcStrict; 1977 1978 /* 1979 * We're hopefully not returning to V8086 mode... 1980 */ 1981 if ( (uNewFlags & X86_EFL_VM) 1982 && pIemCpu->uCpl == 0) 1983 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, enmEffOpSize, uNewEip, uNewCs, uNewFlags, uNewRsp); 1984 1985 /* 1986 * Protected mode. 1987 */ 1988 /* Read the CS descriptor. */ 1989 if (!(uNewCs & X86_SEL_MASK_OFF_RPL)) 1990 { 1991 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip)); 1992 return iemRaiseGeneralProtectionFault0(pIemCpu); 1993 } 1994 1995 IEMSELDESC DescCS; 1996 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs); 1997 if (rcStrict != VINF_SUCCESS) 1998 { 1999 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict))); 2000 return rcStrict; 2001 } 2002 2003 /* Must be a code descriptor. */ 2004 if (!DescCS.Legacy.Gen.u1DescType) 2005 { 2006 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 2007 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2008 } 2009 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 2010 { 2011 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 2012 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2013 } 2014 2015 /* Privilege checks. */ 2016 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl) 2017 { 2018 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl)); 2019 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2020 } 2021 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 2022 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl) 2023 { 2024 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl)); 2025 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2026 } 2027 2028 /* Present? */ 2029 if (!DescCS.Legacy.Gen.u1Present) 2030 { 2031 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip)); 2032 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs); 2033 } 2034 2035 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); 2036 2037 /* 2038 * Return to outer level? 2039 */ 2040 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl) 2041 { 2042 uint16_t uNewSS; 2043 uint32_t uNewESP; 1935 2044 if (enmEffOpSize == IEMMODE_32BIT) 1936 2045 { 1937 rcStrict = iemMemStackPop BeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);2046 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp); 1938 2047 if (rcStrict != VINF_SUCCESS) 1939 2048 return rcStrict; 1940 uNewEip = uFrame.pu32[0]; 1941 uNewCs = (uint16_t)uFrame.pu32[1]; 1942 uNewFlags = uFrame.pu32[2]; 2049 uNewESP = uFrame.pu32[0]; 2050 uNewSS = (uint16_t)uFrame.pu32[1]; 1943 2051 } 1944 2052 else 1945 2053 { 1946 rcStrict = iemMemStackPop BeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);2054 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp); 1947 2055 if (rcStrict != VINF_SUCCESS) 1948 2056 return rcStrict; 1949 uNewEip = uFrame.pu16[0]; 1950 uNewCs = uFrame.pu16[1]; 1951 uNewFlags = uFrame.pu16[2]; 1952 } 1953 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */ 2057 uNewESP = uFrame.pu16[0]; 2058 uNewSS = uFrame.pu16[1]; 2059 } 2060 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); 1954 2061 if (rcStrict != VINF_SUCCESS) 1955 2062 return rcStrict; 1956 2063 2064 /* Read the SS descriptor. */ 2065 if (!(uNewSS & X86_SEL_MASK_OFF_RPL)) 2066 { 2067 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP)); 2068 return iemRaiseGeneralProtectionFault0(pIemCpu); 2069 } 2070 2071 IEMSELDESC DescSS; 2072 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS); 2073 if (rcStrict != VINF_SUCCESS) 2074 { 2075 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n", 2076 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict))); 2077 return rcStrict; 2078 } 2079 2080 /* Privilege checks. */ 2081 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL)) 2082 { 2083 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP)); 2084 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2085 } 2086 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL)) 2087 { 2088 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n", 2089 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl)); 2090 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2091 } 2092 2093 /* Must be a writeable data segment descriptor. */ 2094 if (!DescSS.Legacy.Gen.u1DescType) 2095 { 2096 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n", 2097 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 2098 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2099 } 2100 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) 2101 { 2102 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n", 2103 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 2104 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2105 } 2106 2107 /* Present? */ 2108 if (!DescSS.Legacy.Gen.u1Present) 2109 { 2110 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP)); 2111 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS); 2112 } 2113 2114 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy); 2115 2116 /* Check EIP. */ 2117 if (uNewEip > cbLimitCS) 2118 { 2119 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n", 2120 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS)); 2121 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 2122 } 2123 1957 2124 /* 1958 * What are we returning to? 2125 * Commit the changes, marking CS and SS accessed first since 2126 * that may fail. 1959 2127 */ 1960 if ( (uNewFlags & X86_EFL_VM) 1961 && pIemCpu->uCpl == 0) 1962 { 1963 /* V8086 mode! */ 1964 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 1965 } 1966 else 1967 { 1968 /* 1969 * Protected mode. 1970 */ 1971 /* Read the CS descriptor. */ 1972 if (!(uNewCs & X86_SEL_MASK_OFF_RPL)) 1973 { 1974 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip)); 1975 return iemRaiseGeneralProtectionFault0(pIemCpu); 1976 } 1977 1978 IEMSELDESC DescCS; 1979 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs); 2128 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2129 { 2130 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 1980 2131 if (rcStrict != VINF_SUCCESS) 1981 {1982 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));1983 2132 return rcStrict; 1984 } 1985 1986 /* Must be a code descriptor. */ 1987 if (!DescCS.Legacy.Gen.u1DescType) 1988 { 1989 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 1990 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1991 } 1992 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 1993 { 1994 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 1995 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1996 } 1997 1998 /* Privilege checks. */ 1999 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl) 2000 { 2001 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl)); 2002 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2003 } 2004 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 2005 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl) 2006 { 2007 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl)); 2008 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 2009 } 2010 2011 /* Present? */ 2012 if (!DescCS.Legacy.Gen.u1Present) 2013 { 2014 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip)); 2015 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs); 2016 } 2017 2018 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); 2019 2020 /* 2021 * Return to outer level? 2022 */ 2023 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl) 2024 { 2025 uint16_t uNewSS; 2026 uint32_t uNewESP; 2027 if (enmEffOpSize == IEMMODE_32BIT) 2028 { 2029 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp); 2030 if (rcStrict != VINF_SUCCESS) 2031 return rcStrict; 2032 uNewESP = uFrame.pu32[0]; 2033 uNewSS = (uint16_t)uFrame.pu32[1]; 2034 } 2035 else 2036 { 2037 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp); 2038 if (rcStrict != VINF_SUCCESS) 2039 return rcStrict; 2040 uNewESP = uFrame.pu16[0]; 2041 uNewSS = uFrame.pu16[1]; 2042 } 2043 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); 2044 if (rcStrict != VINF_SUCCESS) 2045 return rcStrict; 2046 2047 /* Read the SS descriptor. */ 2048 if (!(uNewSS & X86_SEL_MASK_OFF_RPL)) 2049 { 2050 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP)); 2051 return iemRaiseGeneralProtectionFault0(pIemCpu); 2052 } 2053 2054 IEMSELDESC DescSS; 2055 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS); 2056 if (rcStrict != VINF_SUCCESS) 2057 { 2058 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n", 2059 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict))); 2060 return rcStrict; 2061 } 2062 2063 /* Privilege checks. */ 2064 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL)) 2065 { 2066 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP)); 2067 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2068 } 2069 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL)) 2070 { 2071 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n", 2072 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl)); 2073 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2074 } 2075 2076 /* Must be a writeable data segment descriptor. */ 2077 if (!DescSS.Legacy.Gen.u1DescType) 2078 { 2079 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n", 2080 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 2081 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2082 } 2083 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE) 2084 { 2085 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n", 2086 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 2087 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 2088 } 2089 2090 /* Present? */ 2091 if (!DescSS.Legacy.Gen.u1Present) 2092 { 2093 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP)); 2094 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS); 2095 } 2096 2097 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy); 2098 2099 /* Check EIP. */ 2100 if (uNewEip > cbLimitCS) 2101 { 2102 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n", 2103 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS)); 2104 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 2105 } 2106 2107 /* 2108 * Commit the changes, marking CS and SS accessed first since 2109 * that may fail. 2110 */ 2111 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2112 { 2113 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 2114 if (rcStrict != VINF_SUCCESS) 2115 return rcStrict; 2116 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2117 } 2118 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2119 { 2120 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS); 2121 if (rcStrict != VINF_SUCCESS) 2122 return rcStrict; 2123 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2124 } 2125 2126 pCtx->rip = uNewEip; 2127 pCtx->cs.Sel = uNewCs; 2128 pCtx->cs.ValidSel = uNewCs; 2129 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2130 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2131 pCtx->cs.u32Limit = cbLimitCS; 2132 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2133 pCtx->rsp = uNewESP; 2134 pCtx->ss.Sel = uNewSS; 2135 pCtx->ss.ValidSel = uNewSS; 2136 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2137 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2138 pCtx->ss.u32Limit = cbLimitSs; 2139 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2140 2141 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 2142 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 2143 if (enmEffOpSize != IEMMODE_16BIT) 2144 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 2145 if (pIemCpu->uCpl == 0) 2146 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 2147 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 2148 fEFlagsMask |= X86_EFL_IF; 2149 pCtx->eflags.u &= ~fEFlagsMask; 2150 pCtx->eflags.u |= fEFlagsMask & uNewFlags; 2151 2152 pIemCpu->uCpl = uNewCs & X86_SEL_RPL; 2153 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds); 2154 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es); 2155 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs); 2156 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs); 2157 2158 /* Done! */ 2159 2160 } 2161 /* 2162 * Return to the same level. 2163 */ 2164 else 2165 { 2166 /* Check EIP. */ 2167 if (uNewEip > cbLimitCS) 2168 { 2169 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS)); 2170 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 2171 } 2172 2173 /* 2174 * Commit the changes, marking CS first since it may fail. 2175 */ 2176 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2177 { 2178 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 2179 if (rcStrict != VINF_SUCCESS) 2180 return rcStrict; 2181 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2182 } 2183 2184 pCtx->rip = uNewEip; 2185 pCtx->cs.Sel = uNewCs; 2186 pCtx->cs.ValidSel = uNewCs; 2187 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2188 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2189 pCtx->cs.u32Limit = cbLimitCS; 2190 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2191 pCtx->rsp = uNewRsp; 2192 2193 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 2194 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 2195 if (enmEffOpSize != IEMMODE_16BIT) 2196 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 2197 if (pIemCpu->uCpl == 0) 2198 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 2199 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 2200 fEFlagsMask |= X86_EFL_IF; 2201 pCtx->eflags.u &= ~fEFlagsMask; 2202 pCtx->eflags.u |= fEFlagsMask & uNewFlags; 2203 /* Done! */ 2204 } 2205 } 2206 } 2207 2133 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2134 } 2135 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2136 { 2137 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS); 2138 if (rcStrict != VINF_SUCCESS) 2139 return rcStrict; 2140 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2141 } 2142 2143 pCtx->rip = uNewEip; 2144 pCtx->cs.Sel = uNewCs; 2145 pCtx->cs.ValidSel = uNewCs; 2146 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2147 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2148 pCtx->cs.u32Limit = cbLimitCS; 2149 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2150 pCtx->rsp = uNewESP; 2151 pCtx->ss.Sel = uNewSS; 2152 pCtx->ss.ValidSel = uNewSS; 2153 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2154 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2155 pCtx->ss.u32Limit = cbLimitSs; 2156 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2157 2158 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 2159 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 2160 if (enmEffOpSize != IEMMODE_16BIT) 2161 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 2162 if (pIemCpu->uCpl == 0) 2163 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 2164 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 2165 fEFlagsMask |= X86_EFL_IF; 2166 pCtx->eflags.u &= ~fEFlagsMask; 2167 pCtx->eflags.u |= fEFlagsMask & uNewFlags; 2168 2169 pIemCpu->uCpl = uNewCs & X86_SEL_RPL; 2170 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds); 2171 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es); 2172 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs); 2173 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs); 2174 2175 /* Done! */ 2176 2177 } 2178 /* 2179 * Return to the same level. 2180 */ 2181 else 2182 { 2183 /* Check EIP. */ 2184 if (uNewEip > cbLimitCS) 2185 { 2186 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS)); 2187 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 2188 } 2189 2190 /* 2191 * Commit the changes, marking CS first since it may fail. 2192 */ 2193 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 2194 { 2195 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 2196 if (rcStrict != VINF_SUCCESS) 2197 return rcStrict; 2198 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2199 } 2200 2201 pCtx->rip = uNewEip; 2202 pCtx->cs.Sel = uNewCs; 2203 pCtx->cs.ValidSel = uNewCs; 2204 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2205 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2206 pCtx->cs.u32Limit = cbLimitCS; 2207 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2208 pCtx->rsp = uNewRsp; 2209 2210 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 2211 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 2212 if (enmEffOpSize != IEMMODE_16BIT) 2213 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 2214 if (pIemCpu->uCpl == 0) 2215 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 2216 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 2217 fEFlagsMask |= X86_EFL_IF; 2218 pCtx->eflags.u &= ~fEFlagsMask; 2219 pCtx->eflags.u |= fEFlagsMask & uNewFlags; 2220 /* Done! */ 2221 } 2208 2222 return VINF_SUCCESS; 2209 2223 } … … 2222 2236 2223 2237 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize); 2224 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;2238 IEM_RETURN_ASPECT_NOT_IMPLEMENTED(); 2225 2239 } 2226 2240 … … 2894 2908 case 8: 2895 2909 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 2896 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */2910 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */ 2897 2911 else 2898 2912 crX = 0xff; … … 3177 3191 case 8: 3178 3192 if (!IEM_VERIFICATION_ENABLED(pIemCpu)) 3179 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */3193 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement CR8/TPR read\n")); /** @todo implement CR8 reading and writing. */ 3180 3194 else 3181 3195 rcStrict = VINF_SUCCESS; … … 3584 3598 { 3585 3599 /** @todo I/O port permission bitmap check */ 3586 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);3600 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap checks.\n")); 3587 3601 } 3588 3602 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r42024 r42437 625 625 { 626 626 NOREF(pIemCpu); NOREF(bRm); 627 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); 627 IEMOP_BITCH_ABOUT_STUB(); 628 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 628 629 } 629 630 … … 665 666 { 666 667 NOREF(pIemCpu); NOREF(bRm); 667 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); 668 IEMOP_BITCH_ABOUT_STUB(); 669 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 668 670 } 669 671 … … 673 675 { 674 676 NOREF(pIemCpu); 675 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); 677 IEMOP_BITCH_ABOUT_STUB(); 678 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 676 679 } 677 680 … … 681 684 { 682 685 NOREF(pIemCpu); 683 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); 686 IEMOP_BITCH_ABOUT_STUB(); 687 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 684 688 } 685 689 … … 862 866 { 863 867 NOREF(pIemCpu); 864 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); 868 IEMOP_BITCH_ABOUT_STUB(); 869 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 865 870 } 866 871 … … 870 875 { 871 876 NOREF(pIemCpu); 872 AssertFailedReturn(VERR_IEM_INSTR_NOT_IMPLEMENTED); 877 IEMOP_BITCH_ABOUT_STUB(); 878 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 873 879 } 874 880 … … 973 979 /** Opcode 0x0f 0x08. */ 974 980 FNIEMOP_STUB(iemOp_invd); 981 982 975 983 /** Opcode 0x0f 0x09. */ 976 FNIEMOP_STUB(iemOp_wbinvd); 984 FNIEMOP_DEF(iemOp_wbinvd) 985 { 986 IEMOP_MNEMONIC("wbinvd"); 987 IEMOP_HLP_NO_LOCK_PREFIX(); 988 IEM_MC_BEGIN(0, 0); 989 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); 990 IEM_MC_ADVANCE_RIP(); 991 IEM_MC_END(); 992 return VINF_SUCCESS; /* ignore for now */ 993 } 994 995 977 996 /** Opcode 0x0f 0x0b. */ 978 997 FNIEMOP_STUB(iemOp_ud2);
Note:
See TracChangeset
for help on using the changeset viewer.