Changeset 102579 in vbox
- Timestamp:
- Dec 12, 2023 12:11:24 AM (15 months ago)
- svn:sync-xref-src-repo-rev:
- 160727
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r102577 r102579 3070 3070 'IEM_MC_PUSH_FPU_RESULT_MEM_OP': (McBlock.parseMcGeneric, True, False, ), 3071 3071 'IEM_MC_PUSH_FPU_RESULT_TWO': (McBlock.parseMcGeneric, True, False, ), 3072 'IEM_MC_PUSH_U16': (McBlock.parseMcGeneric, True, False,),3073 'IEM_MC_PUSH_U32': (McBlock.parseMcGeneric, True, False,),3074 'IEM_MC_PUSH_U32_SREG': (McBlock.parseMcGeneric, True, False,),3075 'IEM_MC_PUSH_U64': (McBlock.parseMcGeneric, True, False,),3072 'IEM_MC_PUSH_U16': (McBlock.parseMcGeneric, True, True, ), 3073 'IEM_MC_PUSH_U32': (McBlock.parseMcGeneric, True, True, ), 3074 'IEM_MC_PUSH_U32_SREG': (McBlock.parseMcGeneric, True, True, ), 3075 'IEM_MC_PUSH_U64': (McBlock.parseMcGeneric, True, True, ), 3076 3076 'IEM_MC_RAISE_DIVIDE_ERROR': (McBlock.parseMcGeneric, True, False, ), 3077 3077 'IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO': (McBlock.parseMcGeneric, True, False, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r102577 r102579 174 174 175 175 # Flat Stack: 176 'IEM_MC_FLAT64_PUSH_U16': (None, True, False,),177 'IEM_MC_FLAT64_PUSH_U64': (None, True, False,),176 'IEM_MC_FLAT64_PUSH_U16': (None, True, True, ), 177 'IEM_MC_FLAT64_PUSH_U64': (None, True, True, ), 178 178 'IEM_MC_FLAT64_POP_GREG_U16': (None, True, True, ), 179 179 'IEM_MC_FLAT64_POP_GREG_U64': (None, True, True, ), 180 'IEM_MC_FLAT32_PUSH_U16': (None, True, False,),181 'IEM_MC_FLAT32_PUSH_U32': (None, True, False,),180 'IEM_MC_FLAT32_PUSH_U16': (None, True, True, ), 181 'IEM_MC_FLAT32_PUSH_U32': (None, True, True, ), 182 182 'IEM_MC_FLAT32_POP_GREG_U16': (None, True, True, ), 183 183 'IEM_MC_FLAT32_POP_GREG_U32': (None, True, True, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102577 r102579 9902 9902 /* RT_MAKE_U32_FROM_U8(cBitsVar, cBitsFlat, fSReg, 0) */ 9903 9903 #define IEM_MC_PUSH_U16(a_u16Value) \ 9904 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 0, 0, 0), (uintptr_t)iemNativeHlpStackPushU16) 9904 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 0, 0, 0), \ 9905 (uintptr_t)iemNativeHlpStackPushU16, pCallEntry->idxInstr) 9905 9906 #define IEM_MC_PUSH_U32(a_u32Value) \ 9906 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 0, 0, 0), (uintptr_t)iemNativeHlpStackPushU32) 9907 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 0, 0, 0), \ 9908 (uintptr_t)iemNativeHlpStackPushU32, pCallEntry->idxInstr) 9907 9909 #define IEM_MC_PUSH_U32_SREG(a_uSegVal) \ 9908 off = iemNativeEmitStackPush(pReNative, off, a_uSegVal, RT_MAKE_U32_FROM_U8(32, 0, 1, 0), (uintptr_t)iemNativeHlpStackPushU32SReg) 9910 off = iemNativeEmitStackPush(pReNative, off, a_uSegVal, RT_MAKE_U32_FROM_U8(32, 0, 1, 0), \ 9911 (uintptr_t)iemNativeHlpStackPushU32SReg, pCallEntry->idxInstr) 9909 9912 #define IEM_MC_PUSH_U64(a_u64Value) \ 9910 off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 0, 0, 0), (uintptr_t)iemNativeHlpStackPushU64) 9913 off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 0, 0, 0), \ 9914 (uintptr_t)iemNativeHlpStackPushU64, pCallEntry->idxInstr) 9911 9915 9912 9916 #define IEM_MC_FLAT32_PUSH_U16(a_u16Value) \ 9913 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 32, 0, 0), (uintptr_t)iemNativeHlpStackFlat32PushU16) 9917 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 32, 0, 0), \ 9918 (uintptr_t)iemNativeHlpStackFlat32PushU16, pCallEntry->idxInstr) 9914 9919 #define IEM_MC_FLAT32_PUSH_U32(a_u32Value) \ 9915 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 0, 0), (uintptr_t)iemNativeHlpStackFlat32PushU32) 9920 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 0, 0), \ 9921 (uintptr_t)iemNativeHlpStackFlat32PushU32, pCallEntry->idxInstr) 9916 9922 #define IEM_MC_FLAT32_PUSH_U32_SREG(a_u32Value) \ 9917 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 1, 0), (uintptr_t)iemNativeHlpStackFlat32PushU32SReg) 9923 off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 1, 0), \ 9924 (uintptr_t)iemNativeHlpStackFlat32PushU32SReg, pCallEntry->idxInstr) 9918 9925 9919 9926 #define IEM_MC_FLAT64_PUSH_U16(a_u16Value) \ 9920 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 64, 0, 0), (uintptr_t)iemNativeHlpStackFlat64PushU16) 9927 off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 64, 0, 0), \ 9928 (uintptr_t)iemNativeHlpStackFlat64PushU16, pCallEntry->idxInstr) 9921 9929 #define IEM_MC_FLAT64_PUSH_U64(a_u64Value) \ 9922 off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 64, 0, 0), (uintptr_t)iemNativeHlpStackFlat64PushU64) 9923 9930 off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 64, 0, 0), \ 9931 (uintptr_t)iemNativeHlpStackFlat64PushU64, pCallEntry->idxInstr) 9932 9933 /** IEM_MC[|_FLAT32|_FLAT64]_PUSH_U16/32/32_SREG/64 */ 9924 9934 DECL_INLINE_THROW(uint32_t) 9925 9935 iemNativeEmitStackPush(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarValue, 9926 uint32_t cBitsVarAndFlat, uintptr_t pfnFunction) 9927 { 9928 RT_NOREF(pReNative, off, idxVarValue, cBitsVarAndFlat, pfnFunction); 9929 AssertReleaseFailed(); 9936 uint32_t cBitsVarAndFlat, uintptr_t pfnFunction, uint8_t idxInstr) 9937 { 9938 /* 9939 * Assert sanity. 9940 */ 9941 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarValue); 9942 #ifdef VBOX_STRICT 9943 if (RT_BYTE2(cBitsVarAndFlat) != 0) 9944 { 9945 Assert( (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT 9946 || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT 9947 || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT); 9948 Assert( pfnFunction 9949 == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PushU16 9950 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PushU32 9951 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 1, 0) ? (uintptr_t)iemNativeHlpStackFlat32PushU32SReg 9952 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 16, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PushU16 9953 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PushU64 9954 : UINT64_C(0xc000b000a0009000) )); 9955 } 9956 else 9957 Assert( pfnFunction 9958 == ( cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPushU16 9959 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPushU32 9960 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 1, 0) ? (uintptr_t)iemNativeHlpStackPushU32SReg 9961 : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPushU64 9962 : UINT64_C(0xc000b000a0009000) )); 9963 #endif 9964 9965 #ifdef VBOX_STRICT 9966 /* 9967 * Check that the fExec flags we've got make sense. 9968 */ 9969 off = iemNativeEmitExecFlagsCheck(pReNative, off, pReNative->fExec); 9970 #endif 9971 9972 /* 9973 * To keep things simple we have to commit any pending writes first as we 9974 * may end up making calls. 9975 */ 9976 /** @todo we could postpone this till we make the call and reload the 9977 * registers after returning from the call. Not sure if that's sensible or 9978 * not, though. */ 9979 off = iemNativeRegFlushPendingWrites(pReNative, off); 9980 9981 /* 9982 * Move/spill/flush stuff out of call-volatile registers, keeping whatever 9983 * idxVarValue might be occupying. 9984 * 9985 * This is the easy way out. We could contain this to the tlb-miss branch 9986 * by saving and restoring active stuff here. 9987 */ 9988 /** @todo save+restore active registers and maybe guest shadows in tlb-miss. */ 9989 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */, RT_BIT_32(idxVarValue)); 9990 9991 /* For now, flush any shadow copy of the xSP register. */ 9992 iemNativeRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTREG_GPR(X86_GREG_xSP))); 9993 9994 /* 9995 * Define labels and allocate the result register (trying for the return 9996 * register if we can). 9997 */ 9998 uint16_t const uTlbSeqNo = pReNative->uTlbSeqNo++; 9999 uint32_t const idxLabelTlbMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, UINT32_MAX, uTlbSeqNo); 10000 uint32_t const idxLabelTlbDone = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbDone, UINT32_MAX, uTlbSeqNo); 10001 10002 /* 10003 * First we try to go via the TLB. 10004 */ 10005 //pReNative->pInstrBuf[off++] = 0xcc; 10006 /** @todo later. */ 10007 RT_NOREF(cBitsVarAndFlat); 10008 10009 /* 10010 * Call helper to do the popping. 10011 */ 10012 iemNativeLabelDefine(pReNative, idxLabelTlbMiss, off); 10013 10014 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 10015 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr)); 10016 #else 10017 RT_NOREF(idxInstr); 10018 #endif 10019 10020 /* IEMNATIVE_CALL_ARG1_GREG = idxVarValue (first) */ 10021 off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarValue, 10022 0 /*offAddend*/, true /*fVarAllowInVolatileReg*/); 10023 10024 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ 10025 off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 10026 10027 /* Done setting up parameters, make the call. */ 10028 off = iemNativeEmitCallImm(pReNative, off, pfnFunction); 10029 10030 iemNativeLabelDefine(pReNative, idxLabelTlbDone, off); 10031 9930 10032 return off; 9931 10033 } … … 10014 10116 /* For now, flush the any shadow copy of the guest register that is about 10015 10117 to be popped and the xSP register. */ 10016 iemNativeRegFlushGuestShadows(pReNative, RT_BIT_64(IEMNATIVEGSTREG_GPR(idxGReg)) | RT_BIT_64(X86_GREG_xSP)); 10118 iemNativeRegFlushGuestShadows(pReNative, 10119 RT_BIT_64(IEMNATIVEGSTREG_GPR(idxGReg)) | RT_BIT_64(IEMNATIVEGSTREG_GPR(X86_GREG_xSP))); 10017 10120 10018 10121 /* -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r102569 r102579 3204 3204 DECL_FORCE_INLINE_THROW(uint32_t) 3205 3205 iemNativeEmitLoadArgGregFromStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar, 3206 int32_t offAddend = 0 )3206 int32_t offAddend = 0, bool fVarAllowInVolatileReg = false) 3207 3207 { 3208 3208 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar); … … 3213 3213 if (idxRegVar < RT_ELEMENTS(pReNative->Core.aHstRegs)) 3214 3214 { 3215 Assert(!(RT_BIT_32(idxRegVar) & IEMNATIVE_CALL_VOLATILE_GREG_MASK) );3215 Assert(!(RT_BIT_32(idxRegVar) & IEMNATIVE_CALL_VOLATILE_GREG_MASK) || fVarAllowInVolatileReg); 3216 3216 if (!offAddend) 3217 3217 { … … 3241 3241 DECL_FORCE_INLINE_THROW(uint32_t) 3242 3242 iemNativeEmitLoadArgGregFromImmOrStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar, 3243 int32_t offAddend = 0 )3243 int32_t offAddend = 0, bool fVarAllowInVolatileReg = false) 3244 3244 { 3245 3245 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar); … … 3247 3247 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegArg, pReNative->Core.aVars[idxVar].u.uValue + offAddend); 3248 3248 else 3249 off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, idxRegArg, idxVar, offAddend );3249 off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, idxRegArg, idxVar, offAddend, fVarAllowInVolatileReg); 3250 3250 return off; 3251 3251 }
Note:
See TracChangeset
for help on using the changeset viewer.