Changeset 94617 in vbox for trunk/src/VBox
- Timestamp:
- Apr 15, 2022 1:02:18 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r94051 r94617 87 87 */ 88 88 /** @todo Test the assertion in the intel manual that the CPU reads two 89 * bytes. The question is how this works wrt to #PF and#GP on the89 * bytes. The question is how this works wrt to \#PF and \#GP on the 90 90 * 2nd byte when it's not required. */ 91 91 uint16_t bmBytes = UINT16_MAX; … … 664 664 return rcStrict; 665 665 666 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP666 /** @todo Is the popf VME \#GP(0) delivered after updating RSP+RIP 667 667 * or before? */ 668 668 if ( ( (u16Value & X86_EFL_IF) … … 768 768 * @param uNewPC The new program counter (RIP) value (loaded from the 769 769 * operand). 770 * @param enmEffOpSize The effective operand size.771 770 */ 772 771 IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC) … … 823 822 * @param uNewPC The new program counter (RIP) value (loaded from the 824 823 * operand). 825 * @param enmEffOpSize The effective operand size.826 824 */ 827 825 IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC) … … 878 876 * @param uNewPC The new program counter (RIP) value (loaded from the 879 877 * operand). 880 * @param enmEffOpSize The effective operand size.881 878 */ 882 879 IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC) … … 1068 1065 * must be 16-bit or 32-bit. 1069 1066 */ 1070 /** @todo :effective operand size is probably irrelevant here, only the1071 * 1067 /** @todo effective operand size is probably irrelevant here, only the 1068 * call gate bitness matters?? 1072 1069 */ 1073 1070 VBOXSTRICTRC rcStrict; … … 1182 1179 if (enmBranch == IEMBRANCH_JUMP) 1183 1180 { 1184 /** @todo :This is very similar to regular far jumps; merge! */1181 /** @todo This is very similar to regular far jumps; merge! */ 1185 1182 /* Jumps are fairly simple... */ 1186 1183 … … 1385 1382 } 1386 1383 1387 /** @todo :According to Intel, new stack is checked for enough space first,1388 * 1389 * 1390 * 1391 * incoming stack#PF happens before actual stack switch. AMD is1392 * 1393 * 1384 /** @todo According to Intel, new stack is checked for enough space first, 1385 * then switched. According to AMD, the stack is switched first and 1386 * then pushes might fault! 1387 * NB: OS/2 Warp 3/4 actively relies on the fact that possible 1388 * incoming stack \#PF happens before actual stack switch. AMD is 1389 * either lying or implicitly assumes that new state is committed 1390 * only if and when an instruction doesn't fault. 1394 1391 */ 1395 1392 1396 /** @todo :According to AMD, CS is loaded first, then SS.1397 * 1393 /** @todo According to AMD, CS is loaded first, then SS. 1394 * According to Intel, it's the other way around!? 1398 1395 */ 1399 1396 1400 /** @todo :Intel and AMD disagree on when exactly the CPL changes! */1397 /** @todo Intel and AMD disagree on when exactly the CPL changes! */ 1401 1398 1402 1399 /* Set the accessed bit before committing new SS. */ … … 1598 1595 { 1599 1596 /* Same privilege. */ 1600 /** @todo :This is very similar to regular far calls; merge! */1597 /** @todo This is very similar to regular far calls; merge! */ 1601 1598 1602 1599 /* Check stack first - may #SS(0). */ … … 1893 1890 { 1894 1891 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit)); 1895 /** @todo : Intel says this is#GP(0)! */1892 /** @todo Intel says this is \#GP(0)! */ 1896 1893 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 1897 1894 } … … 2097 2094 { 2098 2095 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit)); 2099 /** @todo : Intel says this is#GP(0)! */2096 /** @todo Intel says this is \#GP(0)! */ 2100 2097 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel); 2101 2098 } … … 2428 2425 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n", 2429 2426 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs)); 2430 /** @todo : Intel says this is#GP(0)! */2427 /** @todo Intel says this is \#GP(0)! */ 2431 2428 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2432 2429 } … … 2524 2521 { 2525 2522 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs)); 2526 /** @todo : Intel says this is#GP(0)! */2523 /** @todo Intel says this is \#GP(0)! */ 2527 2524 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 2528 2525 } … … 2661 2658 * 2662 2659 * @param enmEffOpSize The effective operand size. 2660 * @param cbFrame Frame size. 2661 * @param cParameters Frame parameter count. 2663 2662 */ 2664 2663 IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters) … … 3354 3353 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n", 3355 3354 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS)); 3356 /** @todo : Which is it, #GP(0) or#GP(sel)? */3355 /** @todo Which is it, \#GP(0) or \#GP(sel)? */ 3357 3356 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs); 3358 3357 } … … 3435 3434 { 3436 3435 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS)); 3437 /** @todo : Which is it, #GP(0) or#GP(sel)? */3436 /** @todo Which is it, \#GP(0) or \#GP(sel)? */ 3438 3437 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs); 3439 3438 } … … 3709 3708 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n", 3710 3709 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS)); 3711 /** @todo : Which is it, #GP(0) or#GP(sel)? */3710 /** @todo Which is it, \#GP(0) or \#GP(sel)? */ 3712 3711 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs); 3713 3712 } … … 4862 4861 * 4863 4862 * @returns VINF_SUCCESS. 4864 * @param pu 16Dst Pointer to the destination register.4863 * @param pu64Dst Pointer to the destination register. 4865 4864 * @param uSel The selector to load details for. 4866 4865 * @param fIsLar true = LAR, false = LSL. … … 4969 4968 * @returns VINF_SUCCESS. 4970 4969 * @param pu16Dst Pointer to the destination register. 4971 * @param u 16SelThe selector to load details for.4970 * @param uSel The selector to load details for. 4972 4971 * @param fIsLar true = LAR, false = LSL. 4973 4972 */ … … 5311 5310 * Implements sldt mem. 5312 5311 * 5313 * @param iGReg The general register to store the CRx value in.5314 5312 * @param iEffSeg The effective segment register to use with @a GCPtrMem. 5315 5313 * @param GCPtrEffDst Where to store the 16-bit CR0 value. … … 5489 5487 * Implements str mem. 5490 5488 * 5491 * @param iGReg The general register to store the CRx value in.5492 5489 * @param iEffSeg The effective segment register to use with @a GCPtrMem. 5493 5490 * @param GCPtrEffDst Where to store the 16-bit CR0 value. … … 5682 5679 * Implements smsw mem. 5683 5680 * 5684 * @param iGReg The general register to store the CR0 value in.5685 5681 * @param iEffSeg The effective segment register to use with @a GCPtrMem. 5686 5682 * @param GCPtrEffDst Where to store the 16-bit CR0 value. … … 5739 5735 * @param iCrReg The CRx register to write (valid). 5740 5736 * @param uNewCrX The new value. 5741 * @param enmAccessCr xThe instruction that caused the CrX load.5737 * @param enmAccessCrX The instruction that caused the CrX load. 5742 5738 * @param iGReg The general register in case of a 'mov CRx,GReg' 5743 5739 * instruction. … … 8380 8376 * Implements 'FXRSTOR'. 8381 8377 * 8378 * @param iEffSeg The effective segment register for @a GCPtrEff. 8382 8379 * @param GCPtrEff The address of the image. 8383 8380 * @param enmEffOpSize The operand size (only REX.W really matters). … … 8869 8866 * Implements 'STMXCSR'. 8870 8867 * 8868 * @param iEffSeg The effective segment register for @a GCPtrEff. 8871 8869 * @param GCPtrEff The address of the image. 8872 8870 */ … … 8903 8901 * Implements 'VSTMXCSR'. 8904 8902 * 8903 * @param iEffSeg The effective segment register for @a GCPtrEff. 8905 8904 * @param GCPtrEff The address of the image. 8906 8905 */ … … 8939 8938 * Implements 'LDMXCSR'. 8940 8939 * 8940 * @param iEffSeg The effective segment register for @a GCPtrEff. 8941 8941 * @param GCPtrEff The address of the image. 8942 8942 */ … … 9123 9123 * 9124 9124 * @param enmEffOpSize The operand size (only REX.W really matters). 9125 * @param iEffSeg The effective segment register for @a GCPtrEff .9125 * @param iEffSeg The effective segment register for @a GCPtrEffDst. 9126 9126 * @param GCPtrEffDst The address of the image. 9127 9127 */ … … 9149 9149 * Implements 'FNSAVE'. 9150 9150 * 9151 * @param enmEffOpSize The operand size. 9152 * @param iEffSeg The effective segment register for @a GCPtrEffDst. 9151 9153 * @param GCPtrEffDst The address of the image. 9152 * @param enmEffOpSize The operand size.9153 9154 */ 9154 9155 IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) … … 9201 9202 * 9202 9203 * @param enmEffOpSize The operand size (only REX.W really matters). 9203 * @param iEffSeg The effective segment register for @a GCPtrEff .9204 * @param iEffSeg The effective segment register for @a GCPtrEffSrc. 9204 9205 * @param GCPtrEffSrc The address of the image. 9205 9206 */ … … 9227 9228 * Implements 'FRSTOR'. 9228 9229 * 9230 * @param enmEffOpSize The operand size. 9231 * @param iEffSeg The effective segment register for @a GCPtrEffSrc. 9229 9232 * @param GCPtrEffSrc The address of the image. 9230 * @param enmEffOpSize The operand size.9231 9233 */ 9232 9234 IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc) … … 9337 9339 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'. 9338 9340 * 9339 * @param cToAdd 1 or 7. 9341 * @param iStReg The other stack register. 9342 * @param pfnAImpl The assembly comparison implementation. 9343 * @param fPop Whether we should pop the stack when done or not. 9340 9344 */ 9341 9345 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop) -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r94538 r94617 1034 1034 * @optest amd / efl|=af ax=0x0000 -> ax=0xfe0a efl&|=cf,po,af,nz,ng,nv 1035 1035 * @optest intel / efl|=af ax=0x0100 -> ax=0xff0a efl&|=cf,po,af,nz,pl,nv 1036 * @optest 8amd / efl|=af ax=0x0100 -> ax=0xff0a efl&|=cf,po,af,nz,ng,nv1036 * @optest amd / efl|=af ax=0x0100 -> ax=0xff0a efl&|=cf,po,af,nz,ng,nv 1037 1037 * @optest intel / efl|=af ax=0x000a -> ax=0xff04 efl&|=cf,pe,af,nz,pl,nv 1038 * @optest 10amd / efl|=af ax=0x000a -> ax=0xff04 efl&|=cf,pe,af,nz,ng,nv1038 * @optest amd / efl|=af ax=0x000a -> ax=0xff04 efl&|=cf,pe,af,nz,ng,nv 1039 1039 * @optest / efl|=af ax=0x010a -> ax=0x0004 efl&|=cf,pe,af,nz,pl,nv 1040 1040 * @optest / efl|=af ax=0x020a -> ax=0x0104 efl&|=cf,pe,af,nz,pl,nv … … 1048 1048 * @optest amd / efl&~=af ax=0xff09 -> ax=0xff09 efl&|=nc,po,na,nz,ng,nv 1049 1049 * @optest intel / efl&~=af ax=0x000b -> ax=0xff05 efl&|=cf,po,af,nz,pl,nv 1050 * @optest 22amd / efl&~=af ax=0x000b -> ax=0xff05 efl&|=cf,po,af,nz,ng,nv1050 * @optest amd / efl&~=af ax=0x000b -> ax=0xff05 efl&|=cf,po,af,nz,ng,nv 1051 1051 * @optest intel / efl&~=af ax=0x000c -> ax=0xff06 efl&|=cf,po,af,nz,pl,nv 1052 * @optest 24amd / efl&~=af ax=0x000c -> ax=0xff06 efl&|=cf,po,af,nz,ng,nv1052 * @optest amd / efl&~=af ax=0x000c -> ax=0xff06 efl&|=cf,po,af,nz,ng,nv 1053 1053 * @optest intel / efl&~=af ax=0x000d -> ax=0xff07 efl&|=cf,pe,af,nz,pl,nv 1054 * @optest 26amd / efl&~=af ax=0x000d -> ax=0xff07 efl&|=cf,pe,af,nz,ng,nv1054 * @optest amd / efl&~=af ax=0x000d -> ax=0xff07 efl&|=cf,pe,af,nz,ng,nv 1055 1055 * @optest intel / efl&~=af ax=0x000e -> ax=0xff08 efl&|=cf,pe,af,nz,pl,nv 1056 * @optest 28amd / efl&~=af ax=0x000e -> ax=0xff08 efl&|=cf,pe,af,nz,ng,nv1056 * @optest amd / efl&~=af ax=0x000e -> ax=0xff08 efl&|=cf,pe,af,nz,ng,nv 1057 1057 * @optest intel / efl&~=af ax=0x000f -> ax=0xff09 efl&|=cf,po,af,nz,pl,nv 1058 * @optest 30amd / efl&~=af ax=0x000f -> ax=0xff09 efl&|=cf,po,af,nz,ng,nv1059 * @optest 31intel / efl&~=af ax=0x00fa -> ax=0xff04 efl&|=cf,pe,af,nz,pl,nv1060 * @optest 32amd / efl&~=af ax=0x00fa -> ax=0xff04 efl&|=cf,pe,af,nz,ng,nv1061 * @optest 33intel / efl&~=af ax=0xfffa -> ax=0xfe04 efl&|=cf,pe,af,nz,pl,nv1062 * @optest 34amd / efl&~=af ax=0xfffa -> ax=0xfe04 efl&|=cf,pe,af,nz,ng,nv1058 * @optest amd / efl&~=af ax=0x000f -> ax=0xff09 efl&|=cf,po,af,nz,ng,nv 1059 * @optest intel / efl&~=af ax=0x00fa -> ax=0xff04 efl&|=cf,pe,af,nz,pl,nv 1060 * @optest amd / efl&~=af ax=0x00fa -> ax=0xff04 efl&|=cf,pe,af,nz,ng,nv 1061 * @optest intel / efl&~=af ax=0xfffa -> ax=0xfe04 efl&|=cf,pe,af,nz,pl,nv 1062 * @optest amd / efl&~=af ax=0xfffa -> ax=0xfe04 efl&|=cf,pe,af,nz,ng,nv 1063 1063 */ 1064 1064 FNIEMOP_DEF(iemOp_aas) … … 7081 7081 * result in ST0. 7082 7082 * 7083 * @param bRm Mod R/M byte. 7083 7084 * @param pfnAImpl Pointer to the instruction implementation (assembly). 7084 7085 */ … … 7113 7114 * flags. 7114 7115 * 7116 * @param bRm Mod R/M byte. 7115 7117 * @param pfnAImpl Pointer to the instruction implementation (assembly). 7116 7118 */ … … 7145 7147 * flags, and popping when done. 7146 7148 * 7149 * @param bRm Mod R/M byte. 7147 7150 * @param pfnAImpl Pointer to the instruction implementation (assembly). 7148 7151 */ … … 7241 7244 * the result in ST0. 7242 7245 * 7246 * @param bRm Mod R/M byte. 7243 7247 * @param pfnAImpl Pointer to the instruction implementation (assembly). 7244 7248 */ … … 7940 7944 * in STn, and popping the stack unless IE, DE or ZE was raised. 7941 7945 * 7946 * @param bRm Mod R/M byte. 7942 7947 * @param pfnAImpl Pointer to the instruction implementation (assembly). 7943 7948 */ … … 8393 8398 * the result in ST0. 8394 8399 * 8400 * @param bRm Mod R/M byte. 8395 8401 * @param pfnAImpl Pointer to the instruction implementation (assembly). 8396 8402 */ … … 9061 9067 * result in STn unless IE, DE or ZE was raised. 9062 9068 * 9069 * @param bRm Mod R/M byte. 9063 9070 * @param pfnAImpl Pointer to the instruction implementation (assembly). 9064 9071 */ … … 9142 9149 * memory operand, and storing the result in ST0. 9143 9150 * 9144 * @param pfnAImpl Pointer to the instruction implementation (assembly). 9151 * @param bRm Mod R/M byte. 9152 * @param pfnImpl Pointer to the instruction implementation (assembly). 9145 9153 */ 9146 9154 FNIEMOP_DEF_2(iemOpHlpFpu_ST0_m64r, uint8_t, bRm, PFNIEMAIMPLFPUR64, pfnImpl) … … 9702 9710 * the result in ST0. 9703 9711 * 9712 * @param bRm Mod R/M byte. 9704 9713 * @param pfnAImpl Pointer to the instruction implementation (assembly). 9705 9714 */ … … 10380 10389 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); 10381 10390 10382 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when10391 /** @todo Check out the \#GP case if EIP < CS.Base or EIP > CS.Limit when 10383 10392 * using the 32-bit operand size override. How can that be restarted? See 10384 10393 * weird pseudo code in intel manual. */ … … 10703 10712 { 10704 10713 IEMOP_MNEMONIC(int1, "int1"); /* icebp */ 10705 /** @todo Does not generate #UD on 286, or so they say... Was allegedly a10714 /** @todo Does not generate \#UD on 286, or so they say... Was allegedly a 10706 10715 * prefix byte on 8086 and/or/maybe 80286 without meaning according to the 286 10707 10716 * LOADALL memo. Needs some testing. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap2.cpp.h
r93115 r94617 560 560 561 561 /** 562 * Group 17 jump table for the VEX.F3 variant. .562 * Group 17 jump table for the VEX.F3 variant. 563 563 */ 564 564 IEM_STATIC const PFNIEMOPRM g_apfnVexGroup17_f3[] = -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r94610 r94617 2221 2221 pExit->VpException.InstructionByteCount)) 2222 2222 { 2223 #if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the #GP... */2223 #if 1 /** @todo Need to emulate instruction or we get a triple fault when trying to inject the \#GP... */ 2224 2224 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pExit->VpContext.Rip, 2225 2225 pExit->VpException.InstructionBytes,
Note:
See TracChangeset
for help on using the changeset viewer.