Changeset 40182 in vbox
- Timestamp:
- Feb 20, 2012 3:22:46 PM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 76346
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/x86.h
r40170 r40182 2371 2371 /** Limit - Low word - *IGNORED*. */ 2372 2372 unsigned u16LimitLow : 16; 2373 /** Base address - low eword. - *IGNORED*2373 /** Base address - low word. - *IGNORED* 2374 2374 * Don't try set this to 24 because MSC is doing stupid things then. */ 2375 2375 unsigned u16BaseLow : 16; -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r40174 r40182 5265 5265 5266 5266 /** 5267 * Continue a special stack pop (used by iret ).5267 * Continue a special stack pop (used by iret and retf). 5268 5268 * 5269 5269 * This will raise \#SS or \#PF if appropriate. … … 5439 5439 } 5440 5440 return rcStrict; 5441 } 5442 5443 5444 /** 5445 * Fakes a long mode stack selector for SS = 0. 5446 * 5447 * @param pDescSs Where to return the fake stack descriptor. 5448 * @param uDpl The DPL we want. 5449 */ 5450 static void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) 5451 { 5452 pDescSs->Long.au64[0] = 0; 5453 pDescSs->Long.au64[1] = 0; 5454 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC; 5455 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */ 5456 pDescSs->Long.Gen.u2Dpl = uDpl; 5457 pDescSs->Long.Gen.u1Present = 1; 5458 pDescSs->Long.Gen.u1Long = 1; 5441 5459 } 5442 5460 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r40175 r40182 108 108 static void iemHlpLoadNullDataSelectorProt(PRTSEL puSel, PCPUMSELREGHID pHid) 109 109 { 110 /** @todo write a testcase checking what happends when loading a NULL data111 * selector in protected mode. */110 /** @todo Testcase: write a testcase checking what happends when loading a NULL 111 * data selector in protected mode. */ 112 112 pHid->u64Base = 0; 113 113 pHid->u32Limit = 0; … … 761 761 762 762 /** 763 * Implements far jumps and calls thru task segments (TSS). 764 * 765 * @param uSel The selector. 766 * @param enmBranch The kind of branching we're performing. 767 * @param enmEffOpSize The effective operand size. 768 * @param pDesc The descriptor corrsponding to @a uSel. The type is 769 * call gate. 770 */ 771 IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) 772 { 773 /* Call various functions to do the work. */ 774 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 775 } 776 777 778 /** 779 * Implements far jumps and calls thru task gates. 780 * 781 * @param uSel The selector. 782 * @param enmBranch The kind of branching we're performing. 783 * @param enmEffOpSize The effective operand size. 784 * @param pDesc The descriptor corrsponding to @a uSel. The type is 785 * call gate. 786 */ 787 IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) 788 { 789 /* Call various functions to do the work. */ 790 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 791 } 792 793 794 /** 795 * Implements far jumps and calls thru call gates. 796 * 797 * @param uSel The selector. 798 * @param enmBranch The kind of branching we're performing. 799 * @param enmEffOpSize The effective operand size. 800 * @param pDesc The descriptor corrsponding to @a uSel. The type is 801 * call gate. 802 */ 803 IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) 804 { 805 /* Call various functions to do the work. */ 806 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 807 } 808 809 810 /** 811 * Implements far jumps and calls thru system selectors. 812 * 813 * @param uSel The selector. 814 * @param enmBranch The kind of branching we're performing. 815 * @param enmEffOpSize The effective operand size. 816 * @param pDesc The descriptor corrsponding to @a uSel. 817 */ 818 IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc) 819 { 820 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL); 821 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT))); 822 823 if (IEM_IS_LONG_MODE(pIemCpu)) 824 switch (pDesc->Legacy.Gen.u4Type) 825 { 826 case AMD64_SEL_TYPE_SYS_CALL_GATE: 827 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc); 828 829 default: 830 case AMD64_SEL_TYPE_SYS_LDT: 831 case AMD64_SEL_TYPE_SYS_TSS_BUSY: 832 case AMD64_SEL_TYPE_SYS_TSS_AVAIL: 833 case AMD64_SEL_TYPE_SYS_TRAP_GATE: 834 case AMD64_SEL_TYPE_SYS_INT_GATE: 835 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type)); 836 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 837 838 } 839 840 switch (pDesc->Legacy.Gen.u4Type) 841 { 842 case X86_SEL_TYPE_SYS_286_CALL_GATE: 843 case X86_SEL_TYPE_SYS_386_CALL_GATE: 844 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc); 845 846 case X86_SEL_TYPE_SYS_TASK_GATE: 847 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc); 848 849 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: 850 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: 851 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc); 852 853 case X86_SEL_TYPE_SYS_286_TSS_BUSY: 854 Log(("branch %04x -> busy 286 TSS\n", uSel)); 855 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 856 857 case X86_SEL_TYPE_SYS_386_TSS_BUSY: 858 Log(("branch %04x -> busy 386 TSS\n", uSel)); 859 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 860 861 default: 862 case X86_SEL_TYPE_SYS_LDT: 863 case X86_SEL_TYPE_SYS_286_INT_GATE: 864 case X86_SEL_TYPE_SYS_286_TRAP_GATE: 865 case X86_SEL_TYPE_SYS_386_INT_GATE: 866 case X86_SEL_TYPE_SYS_386_TRAP_GATE: 867 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type)); 868 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 869 } 870 } 871 872 873 /** 763 874 * Implements far jumps. 764 875 * … … 771 882 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 772 883 NOREF(cbInstr); 884 Assert(offSeg <= UINT32_MAX); 773 885 774 886 /* … … 802 914 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT))) 803 915 { 804 Log(("jmpf %04x:%08 x-> invalid selector, #GP(0)\n", uSel, offSeg));916 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg)); 805 917 return iemRaiseGeneralProtectionFault0(pIemCpu); 806 918 } … … 815 927 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */ 816 928 { 817 Log(("jmpf %04x:%08 x-> segment not present\n", uSel, offSeg));929 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg)); 818 930 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel); 819 931 } 820 932 821 933 /* 822 * Deal with it according to its type. 823 */ 824 if (Desc.Legacy.Gen.u1DescType) 825 { 826 /* Only code segments. */ 827 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 828 { 829 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type)); 934 * Deal with it according to its type. We do the standard code selectors 935 * here and dispatch the system selectors to worker functions. 936 */ 937 if (!Desc.Legacy.Gen.u1DescType) 938 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc); 939 940 /* Only code segments. */ 941 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 942 { 943 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type)); 944 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 945 } 946 947 /* L vs D. */ 948 if ( Desc.Legacy.Gen.u1Long 949 && Desc.Legacy.Gen.u1DefBig 950 && IEM_IS_LONG_MODE(pIemCpu)) 951 { 952 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg)); 953 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 954 } 955 956 /* DPL/RPL/CPL check, where conforming segments makes a difference. */ 957 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 958 { 959 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl) 960 { 961 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n", 962 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 830 963 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 831 964 } 832 833 /* L vs D. */ 834 if ( Desc.Legacy.Gen.u1Long 835 && Desc.Legacy.Gen.u1DefBig 836 && IEM_IS_LONG_MODE(pIemCpu)) 837 { 838 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg)); 965 } 966 else 967 { 968 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl) 969 { 970 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 839 971 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 840 972 } 841 842 /* DPL/RPL/CPL check, where conforming segments makes a difference. */ 843 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)) 844 { 845 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl) 846 { 847 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n", 848 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 849 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 850 } 851 } 852 else 853 { 854 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl) 855 { 856 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 857 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 858 } 859 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl) 860 { 861 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl)); 862 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 863 } 864 } 865 866 /* Limit check. (Should alternatively check for non-canonical addresses 867 here, but that is ruled out by offSeg being 32-bit, right?) */ 868 uint64_t u64Base; 869 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy); 870 if (Desc.Legacy.Gen.u1Granularity) 871 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 872 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 873 u64Base = 0; 874 else 875 { 876 if (offSeg > cbLimit) 877 { 878 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit)); 879 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 880 } 881 u64Base = X86DESC_BASE(Desc.Legacy); 882 } 883 884 /* 885 * Ok, everything checked out fine. Now set the accessed bit before 886 * committing the result into CS, CSHID and RIP. 887 */ 888 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 889 { 890 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel); 891 if (rcStrict != VINF_SUCCESS) 892 return rcStrict; 973 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl) 974 { 975 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl)); 976 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 977 } 978 } 979 980 /* Chop the high bits if 16-bit (Intel says so). */ 981 if (enmEffOpSize == IEMMODE_16BIT) 982 offSeg &= UINT16_MAX; 983 984 /* Limit check. (Should alternatively check for non-canonical addresses 985 here, but that is ruled out by offSeg being 32-bit, right?) */ 986 uint64_t u64Base; 987 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy); 988 if (Desc.Legacy.Gen.u1Granularity) 989 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 990 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 991 u64Base = 0; 992 else 993 { 994 if (offSeg > cbLimit) 995 { 996 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit)); 997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 998 } 999 u64Base = X86DESC_BASE(Desc.Legacy); 1000 } 1001 1002 /* 1003 * Ok, everything checked out fine. Now set the accessed bit before 1004 * committing the result into CS, CSHID and RIP. 1005 */ 1006 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1007 { 1008 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel); 1009 if (rcStrict != VINF_SUCCESS) 1010 return rcStrict; 893 1011 #ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */ 894 1012 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 895 1013 #endif 896 } 897 898 /* commit */ 899 pCtx->rip = offSeg; 900 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT); 901 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */ 902 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); 903 pCtx->csHid.u32Limit = cbLimit; 904 pCtx->csHid.u64Base = u64Base; 905 /** @todo check if the hidden bits are loaded correctly for 64-bit 906 * mode. */ 907 return VINF_SUCCESS; 908 } 909 910 /* 911 * System selector. 912 */ 913 if (IEM_IS_LONG_MODE(pIemCpu)) 914 switch (Desc.Legacy.Gen.u4Type) 915 { 916 case AMD64_SEL_TYPE_SYS_LDT: 917 case AMD64_SEL_TYPE_SYS_TSS_AVAIL: 918 case AMD64_SEL_TYPE_SYS_TSS_BUSY: 919 case AMD64_SEL_TYPE_SYS_CALL_GATE: 920 case AMD64_SEL_TYPE_SYS_INT_GATE: 921 case AMD64_SEL_TYPE_SYS_TRAP_GATE: 922 /* Call various functions to do the work. */ 923 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 924 default: 925 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type)); 926 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 927 928 } 929 switch (Desc.Legacy.Gen.u4Type) 930 { 931 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: 932 case X86_SEL_TYPE_SYS_LDT: 933 case X86_SEL_TYPE_SYS_286_CALL_GATE: 934 case X86_SEL_TYPE_SYS_TASK_GATE: 935 case X86_SEL_TYPE_SYS_286_INT_GATE: 936 case X86_SEL_TYPE_SYS_286_TRAP_GATE: 937 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: 938 case X86_SEL_TYPE_SYS_386_CALL_GATE: 939 case X86_SEL_TYPE_SYS_386_INT_GATE: 940 case X86_SEL_TYPE_SYS_386_TRAP_GATE: 941 /* Call various functions to do the work. */ 942 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 943 944 case X86_SEL_TYPE_SYS_286_TSS_BUSY: 945 case X86_SEL_TYPE_SYS_386_TSS_BUSY: 946 /* Call various functions to do the work. */ 947 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 948 949 default: 950 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type)); 951 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 952 } 1014 } 1015 1016 /* commit */ 1017 pCtx->rip = offSeg; 1018 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT); 1019 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */ 1020 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy); 1021 pCtx->csHid.u32Limit = cbLimit; 1022 pCtx->csHid.u64Base = u64Base; 1023 /** @todo check if the hidden bits are loaded correctly for 64-bit 1024 * mode. */ 1025 return VINF_SUCCESS; 953 1026 } 954 1027 … … 959 1032 * This very similar to iemCImpl_FarJmp. 960 1033 * 961 * @param uSel The selector.962 * @param offSeg The segment offset.963 * @param enm OpSizeThe operand size (in case we need it).964 */ 965 IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enm OpSize)1034 * @param uSel The selector. 1035 * @param offSeg The segment offset. 1036 * @param enmEffOpSize The operand size (in case we need it). 1037 */ 1038 IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize) 966 1039 { 967 1040 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); … … 978 1051 && IEM_IS_REAL_OR_V86_MODE(pIemCpu)) 979 1052 { 980 Assert(enm OpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);1053 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT); 981 1054 982 1055 /* Check stack first - may #SS(0). */ 983 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enm OpSize == IEMMODE_32BIT ? 6 : 4,1056 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4, 984 1057 &pvRet, &uNewRsp); 985 1058 if (rcStrict != VINF_SUCCESS) … … 991 1064 992 1065 /* Everything is fine, push the return address. */ 993 if (enm OpSize == IEMMODE_16BIT)1066 if (enmEffOpSize == IEMMODE_16BIT) 994 1067 { 995 1068 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr; … … 1016 1089 return VINF_SUCCESS; 1017 1090 } 1018 /** @todo implement next (pci bios call). */ 1019 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); 1091 1092 /* 1093 * Protected mode. Need to parse the specified descriptor... 1094 */ 1095 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT))) 1096 { 1097 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg)); 1098 return iemRaiseGeneralProtectionFault0(pIemCpu); 1099 } 1100 1101 /* Fetch the descriptor. */ 1102 IEMSELDESC Desc; 1103 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel); 1104 if (rcStrict != VINF_SUCCESS) 1105 return rcStrict; 1106 1107 /* 1108 * Deal with it according to its type. We do the standard code selectors 1109 * here and dispatch the system selectors to worker functions. 1110 */ 1111 if (!Desc.Legacy.Gen.u1DescType) 1112 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc); 1113 1114 /* Only code segments. */ 1115 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 1116 { 1117 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type)); 1118 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1119 } 1120 1121 /* L vs D. */ 1122 if ( Desc.Legacy.Gen.u1Long 1123 && Desc.Legacy.Gen.u1DefBig 1124 && IEM_IS_LONG_MODE(pIemCpu)) 1125 { 1126 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg)); 1127 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1128 } 1129 1130 /* DPL/RPL/CPL check, where conforming segments makes a difference. */ 1131 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1132 { 1133 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl) 1134 { 1135 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n", 1136 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 1137 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1138 } 1139 } 1140 else 1141 { 1142 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl) 1143 { 1144 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl)); 1145 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1146 } 1147 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl) 1148 { 1149 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl)); 1150 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1151 } 1152 } 1153 1154 /* Is it there? */ 1155 if (!Desc.Legacy.Gen.u1Present) 1156 { 1157 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg)); 1158 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel); 1159 } 1160 1161 /* Check stack first - may #SS(0). */ 1162 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 1163 enmEffOpSize == IEMMODE_64BIT ? 8+2 1164 : enmEffOpSize == IEMMODE_32BIT ? 4+2 : 2+2, 1165 &pvRet, &uNewRsp); 1166 if (rcStrict != VINF_SUCCESS) 1167 return rcStrict; 1168 1169 /* Chop the high bits if 16-bit (Intel says so). */ 1170 if (enmEffOpSize == IEMMODE_16BIT) 1171 offSeg &= UINT16_MAX; 1172 1173 /* Limit / canonical check. */ 1174 uint64_t u64Base; 1175 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy); 1176 if (Desc.Legacy.Gen.u1Granularity) 1177 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1178 1179 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1180 { 1181 if (!IEM_IS_CANONICAL(offSeg)) 1182 { 1183 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg)); 1184 return iemRaiseNotCanonical(pIemCpu); 1185 } 1186 u64Base = 0; 1187 } 1188 else 1189 { 1190 if (offSeg > cbLimit) 1191 { 1192 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit)); 1193 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1194 } 1195 u64Base = X86DESC_BASE(Desc.Legacy); 1196 } 1197 1198 /* 1199 * Now set the accessed bit before 1200 * writing the return address to the stack and committing the result into 1201 * CS, CSHID and RIP. 1202 */ 1203 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */ 1204 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1205 { 1206 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel); 1207 if (rcStrict != VINF_SUCCESS) 1208 return rcStrict; 1209 #ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */ 1210 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1211 #endif 1212 } 1213 1214 /* stack */ 1215 if (enmEffOpSize == IEMMODE_16BIT) 1216 { 1217 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr; 1218 ((uint16_t *)pvRet)[1] = pCtx->cs; 1219 } 1220 else if (enmEffOpSize == IEMMODE_32BIT) 1221 { 1222 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr; 1223 ((uint32_t *)pvRet)[1] = pCtx->cs; 1224 } 1225 else 1226 { 1227 ((uint64_t *)pvRet)[0] = pCtx->rip + cbInstr; 1228 ((uint64_t *)pvRet)[1] = pCtx->cs; 1229 } 1230 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp); 1231 if (rcStrict != VINF_SUCCESS) 1232 return rcStrict; 1233 1234 /* commit */ 1235 pCtx->rip = offSeg; 1236 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT); 1237 pCtx->cs |= pIemCpu->uCpl; 1238 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy); 1239 pCtx->csHid.u32Limit = cbLimit; 1240 pCtx->csHid.u64Base = u64Base; 1241 /** @todo check if the hidden bits are loaded correctly for 64-bit 1242 * mode. */ 1243 return VINF_SUCCESS; 1020 1244 } 1021 1245 … … 1032 1256 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 1033 1257 VBOXSTRICTRC rcStrict; 1258 RTCPTRUNION uPtrFrame; 1034 1259 uint64_t uNewRsp; 1260 uint64_t uNewRip; 1261 uint16_t uNewCs; 1035 1262 NOREF(cbInstr); 1263 1264 /* 1265 * Read the stack values first. 1266 */ 1267 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2 1268 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8; 1269 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp); 1270 if (rcStrict != VINF_SUCCESS) 1271 return rcStrict; 1272 if (enmEffOpSize == IEMMODE_16BIT) 1273 { 1274 uNewRip = uPtrFrame.pu16[0]; 1275 uNewCs = uPtrFrame.pu16[1]; 1276 } 1277 else if (enmEffOpSize == IEMMODE_32BIT) 1278 { 1279 uNewRip = uPtrFrame.pu32[0]; 1280 uNewCs = uPtrFrame.pu16[2]; 1281 } 1282 else 1283 { 1284 uNewRip = uPtrFrame.pu64[0]; 1285 uNewCs = uPtrFrame.pu16[4]; 1286 } 1036 1287 1037 1288 /* … … 1042 1293 { 1043 1294 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 1044 uint16_t const *pu16Frame;1045 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,1046 (void const **)&pu16Frame, &uNewRsp);1047 if (rcStrict != VINF_SUCCESS)1048 return rcStrict;1049 uint32_t uNewEip;1050 uint16_t uNewCS;1051 if (enmEffOpSize == IEMMODE_32BIT)1052 {1053 uNewCS = pu16Frame[2];1054 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);1055 }1056 else1057 {1058 uNewCS = pu16Frame[1];1059 uNewEip = pu16Frame[0];1060 }1061 1295 /** @todo check how this is supposed to work if sp=0xfffe. */ 1062 1296 … … 1064 1298 /** @todo Intel pseudo code only does the limit check for 16-bit 1065 1299 * operands, AMD does not make any distinction. What is right? */ 1066 if (uNew Eip > pCtx->csHid.u32Limit)1300 if (uNewRip > pCtx->csHid.u32Limit) 1067 1301 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 1068 1302 1069 1303 /* commit the operation. */ 1070 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);1304 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, &uPtrFrame.pv, uNewRsp); 1071 1305 if (rcStrict != VINF_SUCCESS) 1072 1306 return rcStrict; 1073 pCtx->rip = uNew Eip;1074 pCtx->cs = uNewC S;1075 pCtx->csHid.u64Base = (uint32_t)uNewC S<< 4;1307 pCtx->rip = uNewRip; 1308 pCtx->cs = uNewCs; 1309 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4; 1076 1310 /** @todo do we load attribs and limit as well? */ 1077 1311 if (cbPop) … … 1080 1314 } 1081 1315 1082 AssertFailed(); 1083 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; 1316 /* 1317 * Protected mode is complicated, of course. 1318 */ 1319 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT))) 1320 { 1321 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip)); 1322 return iemRaiseGeneralProtectionFault0(pIemCpu); 1323 } 1324 1325 /* Fetch the descriptor. */ 1326 IEMSELDESC DescCs; 1327 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs); 1328 if (rcStrict != VINF_SUCCESS) 1329 return rcStrict; 1330 1331 /* Can only return to a code selector. */ 1332 if ( !DescCs.Legacy.Gen.u1DescType 1333 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) ) 1334 { 1335 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n", 1336 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type)); 1337 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1338 } 1339 1340 /* L vs D. */ 1341 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */ 1342 && DescCs.Legacy.Gen.u1DefBig 1343 && IEM_IS_LONG_MODE(pIemCpu)) 1344 { 1345 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip)); 1346 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1347 } 1348 1349 /* DPL/RPL/CPL checks. */ 1350 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl) 1351 { 1352 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl)); 1353 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1354 } 1355 1356 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1357 { 1358 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl) 1359 { 1360 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n", 1361 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL))); 1362 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1363 } 1364 } 1365 else 1366 { 1367 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl) 1368 { 1369 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n", 1370 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL))); 1371 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1372 } 1373 } 1374 1375 /* Is it there? */ 1376 if (!DescCs.Legacy.Gen.u1Present) 1377 { 1378 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip)); 1379 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs); 1380 } 1381 1382 /* 1383 * Return to outer privilege? (We'll typically have entered via a call gate.) 1384 */ 1385 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl) 1386 { 1387 /* Read the return pointer, it comes before the parameters. */ 1388 RTCPTRUNION uPtrStack; 1389 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp); 1390 if (rcStrict != VINF_SUCCESS) 1391 return rcStrict; 1392 uint16_t uNewOuterSs; 1393 uint64_t uNewOuterRsp; 1394 if (enmEffOpSize == IEMMODE_16BIT) 1395 { 1396 uNewOuterRsp = uPtrFrame.pu16[0]; 1397 uNewOuterSs = uPtrFrame.pu16[1]; 1398 } 1399 else if (enmEffOpSize == IEMMODE_32BIT) 1400 { 1401 uNewOuterRsp = uPtrFrame.pu32[0]; 1402 uNewOuterSs = uPtrFrame.pu16[2]; 1403 } 1404 else 1405 { 1406 uNewOuterRsp = uPtrFrame.pu64[0]; 1407 uNewOuterSs = uPtrFrame.pu16[4]; 1408 } 1409 1410 /* Check for NULL stack selector (invalid in ring-3 and non-long mode) 1411 and read the selector. */ 1412 IEMSELDESC DescSs; 1413 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT))) 1414 { 1415 if ( !DescCs.Legacy.Gen.u1Long 1416 || (uNewOuterSs & X86_SEL_RPL) == 3) 1417 { 1418 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n", 1419 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 1420 return iemRaiseGeneralProtectionFault0(pIemCpu); 1421 } 1422 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */ 1423 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL)); 1424 } 1425 else 1426 { 1427 /* Fetch the descriptor for the new stack segment. */ 1428 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs); 1429 if (rcStrict != VINF_SUCCESS) 1430 return rcStrict; 1431 } 1432 1433 /* Check that RPL of stack and code selectors match. */ 1434 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL)) 1435 { 1436 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 1437 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs); 1438 } 1439 1440 /* Must be a writable data segment. */ 1441 if ( !DescSs.Legacy.Gen.u1DescType 1442 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 1443 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) 1444 { 1445 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n", 1446 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type)); 1447 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs); 1448 } 1449 1450 /* L vs D. (Not mentioned by intel.) */ 1451 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */ 1452 && DescSs.Legacy.Gen.u1DefBig 1453 && IEM_IS_LONG_MODE(pIemCpu)) 1454 { 1455 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n", 1456 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type)); 1457 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs); 1458 } 1459 1460 /* DPL/RPL/CPL checks. */ 1461 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL)) 1462 { 1463 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n", 1464 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL)); 1465 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs); 1466 } 1467 1468 /* Is it there? */ 1469 if (!DescSs.Legacy.Gen.u1Present) 1470 { 1471 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 1472 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs); 1473 } 1474 1475 /* Calc SS limit.*/ 1476 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy); 1477 if (DescSs.Legacy.Gen.u1Granularity) 1478 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1479 1480 1481 /* Is RIP canonical or within CS.limit? */ 1482 uint64_t u64Base; 1483 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy); 1484 if (DescCs.Legacy.Gen.u1Granularity) 1485 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1486 1487 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1488 { 1489 if (!IEM_IS_CANONICAL(uNewRip)) 1490 { 1491 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp)); 1492 return iemRaiseNotCanonical(pIemCpu); 1493 } 1494 u64Base = 0; 1495 } 1496 else 1497 { 1498 if (uNewRip > cbLimitCs) 1499 { 1500 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n", 1501 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs)); 1502 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1503 } 1504 u64Base = X86DESC_BASE(DescCs.Legacy); 1505 } 1506 1507 /* 1508 * Now set the accessed bit before 1509 * writing the return address to the stack and committing the result into 1510 * CS, CSHID and RIP. 1511 */ 1512 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */ 1513 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1514 { 1515 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 1516 if (rcStrict != VINF_SUCCESS) 1517 return rcStrict; 1518 #ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */ 1519 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1520 #endif 1521 } 1522 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */ 1523 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1524 { 1525 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs); 1526 if (rcStrict != VINF_SUCCESS) 1527 return rcStrict; 1528 #ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */ 1529 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1530 #endif 1531 } 1532 1533 /* commit */ 1534 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, &uPtrFrame.pv, uNewRsp); 1535 if (rcStrict != VINF_SUCCESS) 1536 return rcStrict; 1537 if (enmEffOpSize == IEMMODE_16BIT) 1538 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */ 1539 else 1540 pCtx->rip = uNewRip; 1541 pCtx->cs = uNewCs; 1542 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy); 1543 pCtx->csHid.u32Limit = cbLimitCs; 1544 pCtx->csHid.u64Base = u64Base; 1545 pCtx->rsp = uNewRsp; 1546 pCtx->ss = uNewCs; 1547 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSs.Legacy); 1548 pCtx->ssHid.u32Limit = cbLimitSs; 1549 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1550 pCtx->ssHid.u64Base = 0; 1551 else 1552 pCtx->ssHid.u64Base = X86DESC_BASE(DescSs.Legacy); 1553 1554 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL); 1555 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid); 1556 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid); 1557 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid); 1558 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid); 1559 1560 /** @todo check if the hidden bits are loaded correctly for 64-bit 1561 * mode. */ 1562 1563 if (cbPop) 1564 iemRegAddToRsp(pCtx, cbPop); 1565 1566 /* Done! */ 1567 } 1568 /* 1569 * Return to the same privilege level 1570 */ 1571 else 1572 { 1573 /* Limit / canonical check. */ 1574 uint64_t u64Base; 1575 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy); 1576 if (DescCs.Legacy.Gen.u1Granularity) 1577 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1578 1579 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1580 { 1581 if (!IEM_IS_CANONICAL(uNewRip)) 1582 { 1583 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip)); 1584 return iemRaiseNotCanonical(pIemCpu); 1585 } 1586 u64Base = 0; 1587 } 1588 else 1589 { 1590 if (uNewRip > cbLimitCs) 1591 { 1592 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs)); 1593 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1594 } 1595 u64Base = X86DESC_BASE(DescCs.Legacy); 1596 } 1597 1598 /* 1599 * Now set the accessed bit before 1600 * writing the return address to the stack and committing the result into 1601 * CS, CSHID and RIP. 1602 */ 1603 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */ 1604 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1605 { 1606 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 1607 if (rcStrict != VINF_SUCCESS) 1608 return rcStrict; 1609 #ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */ 1610 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1611 #endif 1612 } 1613 1614 /* commit */ 1615 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, &uPtrFrame.pv, uNewRsp); 1616 if (rcStrict != VINF_SUCCESS) 1617 return rcStrict; 1618 if (enmEffOpSize == IEMMODE_16BIT) 1619 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */ 1620 else 1621 pCtx->rip = uNewRip; 1622 pCtx->cs = uNewCs; 1623 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCs.Legacy); 1624 pCtx->csHid.u32Limit = cbLimitCs; 1625 pCtx->csHid.u64Base = u64Base; 1626 /** @todo check if the hidden bits are loaded correctly for 64-bit 1627 * mode. */ 1628 if (cbPop) 1629 iemRegAddToRsp(pCtx, cbPop); 1630 } 1631 return VINF_SUCCESS; 1084 1632 } 1085 1633 … … 1251 1799 VBOXSTRICTRC rcStrict; 1252 1800 RTCPTRUNION uFrame; 1253 uint16_t uNewC S;1801 uint16_t uNewCs; 1254 1802 uint32_t uNewEip; 1255 1803 uint32_t uNewFlags; … … 1261 1809 return rcStrict; 1262 1810 uNewEip = uFrame.pu32[0]; 1263 uNewC S= (uint16_t)uFrame.pu32[1];1811 uNewCs = (uint16_t)uFrame.pu32[1]; 1264 1812 uNewFlags = uFrame.pu32[2]; 1265 1813 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF … … 1275 1823 return rcStrict; 1276 1824 uNewEip = uFrame.pu16[0]; 1277 uNewC S= uFrame.pu16[1];1825 uNewCs = uFrame.pu16[1]; 1278 1826 uNewFlags = uFrame.pu16[2]; 1279 1827 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF … … 1326 1874 return rcStrict; 1327 1875 pCtx->rip = uNewEip; 1328 pCtx->cs = uNewC S;1329 pCtx->csHid.u64Base = (uint32_t)uNewC S<< 4;1876 pCtx->cs = uNewCs; 1877 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4; 1330 1878 /** @todo do we load attribs and limit as well? */ 1331 1879 Assert(uNewFlags & X86_EFL_1); … … 1365 1913 VBOXSTRICTRC rcStrict; 1366 1914 RTCPTRUNION uFrame; 1367 uint16_t uNewC S;1915 uint16_t uNewCs; 1368 1916 uint32_t uNewEip; 1369 1917 uint32_t uNewFlags; … … 1375 1923 return rcStrict; 1376 1924 uNewEip = uFrame.pu32[0]; 1377 uNewC S= (uint16_t)uFrame.pu32[1];1925 uNewCs = (uint16_t)uFrame.pu32[1]; 1378 1926 uNewFlags = uFrame.pu32[2]; 1379 1927 } … … 1384 1932 return rcStrict; 1385 1933 uNewEip = uFrame.pu16[0]; 1386 uNewC S= uFrame.pu16[1];1934 uNewCs = uFrame.pu16[1]; 1387 1935 uNewFlags = uFrame.pu16[2]; 1388 1936 } … … 1406 1954 */ 1407 1955 /* Read the CS descriptor. */ 1408 if (!(uNewC S& (X86_SEL_MASK | X86_SEL_LDT)))1956 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT))) 1409 1957 { 1410 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewC S, uNewEip));1958 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip)); 1411 1959 return iemRaiseGeneralProtectionFault0(pIemCpu); 1412 1960 } 1413 1961 1414 1962 IEMSELDESC DescCS; 1415 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewC S);1963 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs); 1416 1964 if (rcStrict != VINF_SUCCESS) 1417 1965 { 1418 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewC S, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));1966 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict))); 1419 1967 return rcStrict; 1420 1968 } … … 1423 1971 if (!DescCS.Legacy.Gen.u1DescType) 1424 1972 { 1425 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewC S, uNewEip, DescCS.Legacy.Gen.u4Type));1426 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewC S);1973 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 1974 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1427 1975 } 1428 1976 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)) 1429 1977 { 1430 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewC S, uNewEip, DescCS.Legacy.Gen.u4Type));1431 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewC S);1978 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type)); 1979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1432 1980 } 1433 1981 1434 1982 /* Privilege checks. */ 1435 if ((uNewC S& X86_SEL_RPL) < pIemCpu->uCpl)1983 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl) 1436 1984 { 1437 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewC S, uNewEip, pIemCpu->uCpl));1438 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewC S);1985 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl)); 1986 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1439 1987 } 1440 1988 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) 1441 && (uNewC S& X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)1989 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl) 1442 1990 { 1443 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewC S, uNewEip, DescCS.Legacy.Gen.u2Dpl));1444 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewC S);1991 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl)); 1992 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1445 1993 } 1446 1994 … … 1448 1996 if (!DescCS.Legacy.Gen.u1Present) 1449 1997 { 1450 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewC S, uNewEip));1451 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewC S);1998 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip)); 1999 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs); 1452 2000 } 1453 2001 … … 1459 2007 * Return to outer level? 1460 2008 */ 1461 if ((uNewC S& X86_SEL_RPL) != pIemCpu->uCpl)2009 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl) 1462 2010 { 1463 2011 uint16_t uNewSS; … … 1486 2034 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT))) 1487 2035 { 1488 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewC S, uNewEip, uNewSS, uNewESP));2036 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP)); 1489 2037 return iemRaiseGeneralProtectionFault0(pIemCpu); 1490 2038 } … … 1495 2043 { 1496 2044 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n", 1497 uNewC S, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));2045 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict))); 1498 2046 return rcStrict; 1499 2047 } 1500 2048 1501 2049 /* Privilege checks. */ 1502 if ((uNewSS & X86_SEL_RPL) != (uNewC S& X86_SEL_RPL))2050 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL)) 1503 2051 { 1504 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewC S, uNewEip, uNewSS, uNewESP));2052 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP)); 1505 2053 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1506 2054 } 1507 if (DescSS.Legacy.Gen.u2Dpl != (uNewC S& X86_SEL_RPL))2055 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL)) 1508 2056 { 1509 2057 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n", 1510 uNewC S, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));2058 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl)); 1511 2059 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1512 2060 } … … 1516 2064 { 1517 2065 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n", 1518 uNewC S, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));2066 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 1519 2067 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1520 2068 } … … 1522 2070 { 1523 2071 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n", 1524 uNewC S, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));2072 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type)); 1525 2073 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS); 1526 2074 } … … 1529 2077 if (!DescSS.Legacy.Gen.u1Present) 1530 2078 { 1531 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewC S, uNewEip, uNewSS, uNewESP));2079 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP)); 1532 2080 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS); 1533 2081 } 1534 2082 1535 uint32_t cbLimitS S= X86DESC_LIMIT(DescSS.Legacy);2083 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy); 1536 2084 if (DescSS.Legacy.Gen.u1Granularity) 1537 cbLimitS S = (cbLimitSS<< PAGE_SHIFT) | PAGE_OFFSET_MASK;2085 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1538 2086 1539 2087 /* Check EIP. */ … … 1541 2089 { 1542 2090 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n", 1543 uNewC S, uNewEip, uNewSS, uNewESP, cbLimitCS));1544 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewC S);2091 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS)); 2092 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 1545 2093 } 1546 2094 … … 1551 2099 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1552 2100 { 1553 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewC S);2101 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 1554 2102 if (rcStrict != VINF_SUCCESS) 1555 2103 return rcStrict; … … 1565 2113 1566 2114 pCtx->rip = uNewEip; 1567 pCtx->cs = uNewC S;2115 pCtx->cs = uNewCs; 1568 2116 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy); 1569 2117 pCtx->csHid.u32Limit = cbLimitCS; … … 1572 2120 pCtx->ss = uNewSS; 1573 2121 pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy); 1574 pCtx->ssHid.u32Limit = cbLimitS S;2122 pCtx->ssHid.u32Limit = cbLimitSs; 1575 2123 pCtx->ssHid.u64Base = X86DESC_BASE(DescSS.Legacy); 1576 2124 … … 1586 2134 pCtx->eflags.u |= fEFlagsMask & uNewFlags; 1587 2135 1588 pIemCpu->uCpl = uNewC S& X86_SEL_RPL;1589 iemHlpAdjustSelectorForNewCpl(uNewC S& X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);1590 iemHlpAdjustSelectorForNewCpl(uNewC S& X86_SEL_RPL, &pCtx->es, &pCtx->esHid);1591 iemHlpAdjustSelectorForNewCpl(uNewC S& X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);1592 iemHlpAdjustSelectorForNewCpl(uNewC S& X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);2136 pIemCpu->uCpl = uNewCs & X86_SEL_RPL; 2137 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid); 2138 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es, &pCtx->esHid); 2139 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid); 2140 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid); 1593 2141 1594 2142 /* Done! */ … … 1603 2151 if (uNewEip > cbLimitCS) 1604 2152 { 1605 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewC S, uNewEip, cbLimitCS));1606 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewC S);2153 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS)); 2154 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs); 1607 2155 } 1608 2156 … … 1612 2160 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1613 2161 { 1614 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewC S);2162 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs); 1615 2163 if (rcStrict != VINF_SUCCESS) 1616 2164 return rcStrict; … … 1619 2167 1620 2168 pCtx->rip = uNewEip; 1621 pCtx->cs = uNewC S;2169 pCtx->cs = uNewCs; 1622 2170 pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy); 1623 2171 pCtx->csHid.u32Limit = cbLimitCS; … … 1874 2422 /* commit */ 1875 2423 *pSel = uSel; 1876 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */2424 pHid->Attr.u = X86DESC_GET_HID_ATTR(Desc.Legacy); 1877 2425 pHid->u32Limit = cbLimit; 1878 2426 pHid->u64Base = u64Base; -
trunk/src/VBox/VMM/include/IEMInternal.h
r40165 r40182 68 68 } IEMMODEX; 69 69 AssertCompileSize(IEMMODEX, 4); 70 71 72 /** 73 * Branch types. 74 */ 75 typedef enum IEMBRANCH 76 { 77 IEMBRANCH_JUMP = 1, 78 IEMBRANCH_CALL, 79 IEMBRANCH_TRAP, 80 IEMBRANCH_SOFTWARE_INT, 81 IEMBRANCH_HARDWARE_INT 82 } IEMBRANCH; 83 AssertCompileSize(IEMBRANCH, 4); 70 84 71 85 … … 1012 1026 * @param a_Arg3 The name of the 4th argument. 1013 1027 */ 1014 # define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, aArg3) \ 1015 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PIEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3)) 1028 # define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \ 1029 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PIEMCPU pIemCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \ 1030 a_Type2 a_Arg2, a_Type3 a_Arg3)) 1016 1031 /** 1017 1032 * For calling a C instruction implementation function taking four extra -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r40176 r40182 160 160 #define iemAImpl_imul_u8 ((PFNIEMAIMPLMULDIVU8)0) 161 161 #define iemAImpl_mul_u8 ((PFNIEMAIMPLMULDIVU8)0) 162 163 #define iemAImpl_fpu_r64_to_r80 NULL 164 #define iemAImpl_fadd_r80_by_r64 NULL 165 #define iemAImpl_fmul_r80_by_r64 NULL 166 #define iemAImpl_fcom_r80_by_r64 NULL 167 #define iemAImpl_fsub_r80_by_r64 NULL 168 #define iemAImpl_fsubr_r80_by_r64 NULL 169 #define iemAImpl_fdiv_r80_by_r64 NULL 170 #define iemAImpl_fdivr_r80_by_r64 NULL 171 172 #define iemAImpl_fadd_r80_by_r80 NULL 173 #define iemAImpl_fmul_r80_by_r80 NULL 174 #define iemAImpl_fcom_r80_by_r80 NULL 175 #define iemAImpl_fsub_r80_by_r80 NULL 176 #define iemAImpl_fsubr_r80_by_r80 NULL 177 #define iemAImpl_fdiv_r80_by_r80 NULL 178 #define iemAImpl_fdivr_r80_by_r80 NULL 179 180 #define iemCImpl_callf NULL 181 #define iemCImpl_FarJmp NULL 162 182 163 183 /** @} */ … … 444 464 #define IEM_MC_ENDIF() } do {} while (0) 445 465 446 447 #define iemAImpl_fpu_r64_to_r80 NULL448 #define iemAImpl_fadd_r80_by_r64 NULL449 #define iemAImpl_fmul_r80_by_r64 NULL450 #define iemAImpl_fcom_r80_by_r64 NULL451 #define iemAImpl_fsub_r80_by_r64 NULL452 #define iemAImpl_fsubr_r80_by_r64 NULL453 #define iemAImpl_fdiv_r80_by_r64 NULL454 #define iemAImpl_fdivr_r80_by_r64 NULL455 456 #define iemAImpl_fadd_r80_by_r80 NULL457 #define iemAImpl_fmul_r80_by_r80 NULL458 #define iemAImpl_fcom_r80_by_r80 NULL459 #define iemAImpl_fsub_r80_by_r80 NULL460 #define iemAImpl_fsubr_r80_by_r80 NULL461 #define iemAImpl_fdiv_r80_by_r80 NULL462 #define iemAImpl_fdivr_r80_by_r80 NULL463 464 #define iemCImpl_callf NULL465 #define iemCImpl_FarJmp NULL466 467 466 /** @} */ 468 467
Note:
See TracChangeset
for help on using the changeset viewer.