- Timestamp:
- Dec 4, 2023 1:25:29 PM (14 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r102448 r102450 2881 2881 'IEM_MC_FETCH_GREG_U32_SX_U64': (McBlock.parseMcGeneric, False, True, ), 2882 2882 'IEM_MC_FETCH_GREG_U32_ZX_U64': (McBlock.parseMcGeneric, False, True, ), 2883 'IEM_MC_FETCH_GREG_U64': (McBlock.parseMcGeneric, False, False,),2884 'IEM_MC_FETCH_GREG_U64_ZX_U64': (McBlock.parseMcGeneric, False, False,),2883 'IEM_MC_FETCH_GREG_U64': (McBlock.parseMcGeneric, False, True, ), 2884 'IEM_MC_FETCH_GREG_U64_ZX_U64': (McBlock.parseMcGeneric, False, True, ), 2885 2885 'IEM_MC_FETCH_GREG_U8': (McBlock.parseMcGeneric, False, True, ), 2886 2886 'IEM_MC_FETCH_GREG_U8_SX_U16': (McBlock.parseMcGeneric, False, True, ), … … 3134 3134 'IEM_MC_STORE_GREG_U32': (McBlock.parseMcGeneric, True, True, ), 3135 3135 'IEM_MC_STORE_GREG_U32_CONST': (McBlock.parseMcGeneric, True, True, ), 3136 'IEM_MC_STORE_GREG_U64': (McBlock.parseMcGeneric, True, False,),3137 'IEM_MC_STORE_GREG_U64_CONST': (McBlock.parseMcGeneric, True, False,),3136 'IEM_MC_STORE_GREG_U64': (McBlock.parseMcGeneric, True, True, ), 3137 'IEM_MC_STORE_GREG_U64_CONST': (McBlock.parseMcGeneric, True, True, ), 3138 3138 'IEM_MC_STORE_GREG_U8': (McBlock.parseMcGeneric, True, False, ), 3139 3139 'IEM_MC_STORE_GREG_U8_CONST': (McBlock.parseMcGeneric, True, False, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102447 r102450 7479 7479 7480 7480 /* 7481 * We can either just load the low 16-bit of the GPR into a host register7481 * We can either just load the low 32-bit of the GPR into a host register 7482 7482 * for the variable, or we can do so via a shadow copy host register. The 7483 7483 * latter will avoid having to reload it if it's being stored later, but … … 7491 7491 uint8_t const idxVarReg = iemNativeVarAllocRegister(pReNative, idxDstVar, &off); 7492 7492 off = iemNativeEmitLoadGprSignExtendedFromGpr32(pReNative, off, idxVarReg, idxGstFullReg); 7493 7494 iemNativeRegFreeTmp(pReNative, idxGstFullReg); 7495 return off; 7496 } 7497 7498 7499 #define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) \ 7500 off = iemNativeEmitFetchGregU64(pReNative, off, a_u64Dst, a_iGReg) 7501 7502 #define IEM_MC_FETCH_GREG_U64_ZX_U64(a_u64Dst, a_iGReg) \ 7503 off = iemNativeEmitFetchGregU64(pReNative, off, a_u64Dst, a_iGReg) 7504 7505 /** Emits code for IEM_MC_FETCH_GREG_U64 (and the 7506 * IEM_MC_FETCH_GREG_U64_ZX_U64 alias). */ 7507 DECL_INLINE_THROW(uint32_t) 7508 iemNativeEmitFetchGregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDstVar, uint8_t iGReg) 7509 { 7510 Assert(idxDstVar < RT_ELEMENTS(pReNative->Core.aVars) && (pReNative->Core.bmVars & RT_BIT_32(idxDstVar))); 7511 Assert(pReNative->Core.aVars[idxDstVar].cbVar == sizeof(uint64_t)); 7512 Assert(iGReg < 16); 7513 7514 uint8_t const idxGstFullReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg), 7515 kIemNativeGstRegUse_ReadOnly); 7516 7517 iemNativeVarSetKindToStack(pReNative, idxDstVar); 7518 uint8_t const idxVarReg = iemNativeVarAllocRegister(pReNative, idxDstVar, &off); 7519 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxVarReg, idxGstFullReg); 7520 /** @todo name the register a shadow one already? */ 7493 7521 7494 7522 iemNativeRegFreeTmp(pReNative, idxGstFullReg); … … 7828 7856 } 7829 7857 7858 7859 #define IEM_MC_STORE_GREG_U64_CONST(a_iGReg, a_u64Const) \ 7860 off = iemNativeEmitStoreGregU64Const(pReNative, off, a_iGReg, a_u64Const) 7861 7862 /** Emits code for IEM_MC_STORE_GREG_U64_CONST. */ 7863 DECL_INLINE_THROW(uint32_t) 7864 iemNativeEmitStoreGregU64Const(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint64_t uValue) 7865 { 7866 Assert(iGReg < 16); 7867 uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(iGReg), 7868 kIemNativeGstRegUse_ForFullWrite); 7869 off = iemNativeEmitLoadGprImm64(pReNative, off, idxGstTmpReg, uValue); 7870 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg])); 7871 iemNativeRegFreeTmp(pReNative, idxGstTmpReg); 7872 return off; 7873 } 7874 7875 7876 #define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) \ 7877 off = iemNativeEmitStoreGregU64(pReNative, off, a_iGReg, a_u64Value) 7878 7879 /** Emits code for IEM_MC_STORE_GREG_U64. */ 7880 DECL_INLINE_THROW(uint32_t) 7881 iemNativeEmitStoreGregU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t idxValueVar) 7882 { 7883 Assert(iGReg < 16); 7884 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxValueVar); 7885 7886 /* 7887 * If it's a constant value (unlikely) we treat this as a 7888 * IEM_MC_STORE_GREG_U64_CONST statement. 7889 */ 7890 if (pReNative->Core.aVars[idxValueVar].enmKind == kIemNativeVarKind_Stack) 7891 { /* likely */ } 7892 else 7893 { 7894 AssertStmt(pReNative->Core.aVars[idxValueVar].enmKind != kIemNativeVarKind_Immediate, 7895 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND)); 7896 return iemNativeEmitStoreGregU64Const(pReNative, off, iGReg, pReNative->Core.aVars[idxValueVar].u.uValue); 7897 } 7898 7899 /* 7900 * For the rest we allocate a guest register for the variable and writes 7901 * it to the CPUMCTX structure. 7902 */ 7903 uint8_t const idxVarReg = iemNativeVarAllocRegisterForGuestReg(pReNative, idxValueVar, IEMNATIVEGSTREG_GPR(iGReg), &off); 7904 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxVarReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg])); 7905 return off; 7906 } 7830 7907 7831 7908
Note:
See TracChangeset
for help on using the changeset viewer.