- Timestamp:
- Nov 10, 2023 4:14:51 PM (18 months ago)
- svn:sync-xref-src-repo-rev:
- 160174
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r102012 r102065 3061 3061 'IEM_MC_STORE_FPUREG_R80_SRC_REF': (McBlock.parseMcGeneric, True, False, ), 3062 3062 'IEM_MC_STORE_GREG_I64': (McBlock.parseMcGeneric, True, False, ), 3063 'IEM_MC_STORE_GREG_U16': (McBlock.parseMcGeneric, True, False,),3063 'IEM_MC_STORE_GREG_U16': (McBlock.parseMcGeneric, True, True, ), 3064 3064 'IEM_MC_STORE_GREG_U16_CONST': (McBlock.parseMcGeneric, True, False, ), 3065 3065 'IEM_MC_STORE_GREG_U32': (McBlock.parseMcGeneric, True, False, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102022 r102065 140 140 static uint32_t iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, 141 141 uint8_t idxReg, IEMNATIVEGSTREG enmGstReg); 142 static void iemNativeRegAssertSanity(PIEMRECOMPILERSTATE pReNative); 142 143 #endif 143 144 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO … … 145 146 static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData); 146 147 #endif 148 DECL_FORCE_INLINE(void) iemNativeRegClearGstRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, uint32_t off); 147 149 148 150 … … 2259 2261 { 2260 2262 Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK)); 2261 Assert(!(fRegMask & ~IEMNATIVE_REG_FIXED_MASK));2263 Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK)); 2262 2264 2263 2265 /* … … 2311 2313 if (pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack) 2312 2314 { 2313 AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX,2315 AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS, 2314 2316 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_8)); 2315 *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, 2316 pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t) 2317 - IEMNATIVE_FP_OFF_STACK_VARS, 2318 idxReg); 2317 *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, iemNativeVarCalcBpDisp(pReNative, idxVar), idxReg); 2319 2318 } 2320 2319 … … 2379 2378 fRegs &= ~pReNative->Core.bmHstRegsWithGstShadow; 2380 2379 unsigned const idxRegNew = ASMBitFirstSetU32(fRegs) - 1; 2380 iemNativeRegClearGstRegShadowing(pReNative, idxRegNew, off); 2381 2381 2382 2382 uint64_t fGstRegShadows = pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows; 2383 Log12(("iemNativeRegMoveOrSpillStackVar: moving idxVar=%d from %s to %s (fGstRegShadows=%RX64)\n", 2384 idxVar, g_apszIemNativeHstRegNames[idxRegOld], g_apszIemNativeHstRegNames[idxRegNew], fGstRegShadows)); 2383 2385 pReNative->Core.aHstRegs[idxRegNew].fGstRegShadows = fGstRegShadows; 2384 2386 pReNative->Core.aHstRegs[idxRegNew].enmWhat = kIemNativeWhat_Var; … … 2405 2407 else 2406 2408 { 2407 AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_7));2408 off = iemNativeEmitStoreGprByBp(pReNative, off,2409 pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t)2410 - IEMNATIVE_FP_OFF_STACK_VARS,2411 2409 uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot; 2410 Log12(("iemNativeRegMoveOrSpillStackVar: spilling idxVar=%d/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n", 2411 idxVar, idxRegOld, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off)); 2412 AssertStmt(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_7)); 2413 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld); 2412 2414 2413 2415 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld); … … 2963 2965 { /* likely */ } 2964 2966 else 2967 { 2968 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: fRegsToMove=%#x\n", fRegsToMove)); 2965 2969 while (fRegsToMove != 0) 2966 2970 { … … 2976 2980 Assert(pReNative->Core.bmVars & RT_BIT_32(idxVar)); 2977 2981 Assert(pReNative->Core.aVars[idxVar].idxReg == idxReg); 2982 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: idxVar=%d enmKind=%d idxReg=%d\n", 2983 idxVar, pReNative->Core.aVars[idxVar].enmKind, pReNative->Core.aVars[idxVar].idxReg)); 2978 2984 if (pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack) 2979 2985 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX; … … 3002 3008 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_2)); 3003 3009 } 3010 } 3004 3011 3005 3012 /* 3006 3013 * Do the actual freeing. 3007 3014 */ 3015 if (pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK) 3016 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegs %#x -> %#x\n", pReNative->Core.bmHstRegs, pReNative->Core.bmHstRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK)); 3008 3017 pReNative->Core.bmHstRegs &= ~IEMNATIVE_CALL_VOLATILE_GREG_MASK; 3009 3018 … … 3013 3022 if (fHstRegsWithGstShadow) 3014 3023 { 3024 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegsWithGstShadow %#RX32 -> %#RX32; removed %#RX32\n", 3025 pReNative->Core.bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK, fHstRegsWithGstShadow)); 3015 3026 pReNative->Core.bmHstRegsWithGstShadow &= ~fHstRegsWithGstShadow; 3016 3027 do 3017 3028 { 3018 3029 unsigned const idxReg = ASMBitFirstSetU32(fHstRegsWithGstShadow) - 1; 3019 fHstRegsWithGstShadow = ~RT_BIT_32(idxReg);3020 3021 Assert (pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);3030 fHstRegsWithGstShadow &= ~RT_BIT_32(idxReg); 3031 3032 AssertMsg(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0, ("idxReg=%#x\n", idxReg)); 3022 3033 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows; 3023 3034 pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0; … … 3046 3057 if (fGstRegs) 3047 3058 { 3059 Log12(("iemNativeRegFlushGuestShadows: flushing %#RX64 (%#RX64 -> %#RX64)\n", 3060 fGstRegs, pReNative->Core.bmGstRegShadows, pReNative->Core.bmGstRegShadows & ~fGstRegs)); 3048 3061 pReNative->Core.bmGstRegShadows &= ~fGstRegs; 3049 3062 if (pReNative->Core.bmGstRegShadows) … … 3105 3118 3106 3119 3120 #ifdef VBOX_STRICT 3121 /** 3122 * Does internal register allocator sanity checks. 3123 */ 3124 static void iemNativeRegAssertSanity(PIEMRECOMPILERSTATE pReNative) 3125 { 3126 /* 3127 * Iterate host registers building a guest shadowing set. 3128 */ 3129 uint64_t bmGstRegShadows = 0; 3130 uint32_t bmHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow; 3131 AssertMsg(!(bmHstRegsWithGstShadow & IEMNATIVE_REG_FIXED_MASK), ("%#RX32\n", bmHstRegsWithGstShadow)); 3132 while (bmHstRegsWithGstShadow) 3133 { 3134 unsigned const idxHstReg = ASMBitFirstSetU32(bmHstRegsWithGstShadow) - 1; 3135 Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs)); 3136 bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg); 3137 3138 uint64_t fThisGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows; 3139 AssertMsg(fThisGstRegShadows != 0, ("idxHstReg=%d\n", idxHstReg)); 3140 bmGstRegShadows |= fThisGstRegShadows; 3141 while (fThisGstRegShadows) 3142 { 3143 unsigned const idxGstReg = ASMBitFirstSetU64(fThisGstRegShadows) - 1; 3144 fThisGstRegShadows &= ~RT_BIT_64(idxGstReg); 3145 AssertMsg(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxHstReg, 3146 ("idxHstReg=%d aidxGstRegShadows[idxGstReg=%d]=%d\n", 3147 idxHstReg, idxGstReg, pReNative->Core.aidxGstRegShadows[idxGstReg])); 3148 } 3149 } 3150 AssertMsg(bmGstRegShadows == pReNative->Core.bmGstRegShadows, 3151 ("%RX64 vs %RX64; diff %RX64\n", bmGstRegShadows, pReNative->Core.bmGstRegShadows, 3152 bmGstRegShadows ^ pReNative->Core.bmGstRegShadows)); 3153 3154 /* 3155 * Now the other way around, checking the guest to host index array. 3156 */ 3157 bmHstRegsWithGstShadow = 0; 3158 bmGstRegShadows = pReNative->Core.bmGstRegShadows; 3159 while (bmGstRegShadows) 3160 { 3161 unsigned const idxGstReg = ASMBitFirstSetU64(bmGstRegShadows) - 1; 3162 Assert(idxGstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows)); 3163 bmGstRegShadows &= ~RT_BIT_64(idxGstReg); 3164 3165 uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg]; 3166 AssertMsg(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs), ("aidxGstRegShadows[%d]=%d\n", idxGstReg, idxHstReg)); 3167 AssertMsg(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg), 3168 ("idxGstReg=%d idxHstReg=%d fGstRegShadows=%RX64\n", 3169 idxGstReg, idxHstReg, pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows)); 3170 bmHstRegsWithGstShadow |= RT_BIT_32(idxHstReg); 3171 } 3172 AssertMsg(bmHstRegsWithGstShadow == pReNative->Core.bmHstRegsWithGstShadow, 3173 ("%RX64 vs %RX64; diff %RX64\n", bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow, 3174 bmHstRegsWithGstShadow ^ pReNative->Core.bmHstRegsWithGstShadow)); 3175 } 3176 #endif 3177 3178 3107 3179 /********************************************************************************************************************************* 3108 3180 * Code Emitters (larger snippets) * … … 3573 3645 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[1]); 3574 3646 if (cParams > 2) 3647 { 3575 3648 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x10, pCallEntry->auParams[2]); 3576 off = iemNativeEmitStoreGprByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10); 3649 off = iemNativeEmitStoreGprByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10); 3650 } 3577 3651 off = iemNativeEmitLeaGprByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */ 3578 3652 # endif /* VBOXSTRICTRC_STRICT_ENABLED */ … … 3931 4005 #define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) \ 3932 4006 { \ 4007 Assert(pReNative->Core.bmVars == 0); \ 4008 Assert(pReNative->Core.u64ArgVars == UINT64_MAX); \ 4009 Assert(pReNative->Core.bmStack == 0); \ 3933 4010 pReNative->fMc = (a_fMcFlags); \ 3934 4011 pReNative->fCImpl = (a_fCImplFlags); \ … … 3938 4015 * generate code for all the IEM_MC_IF_XXX branches. */ 3939 4016 #define IEM_MC_END() \ 4017 iemNativeVarFreeAll(pReNative); \ 3940 4018 } return off 3941 4019 … … 4399 4477 || idxHstReg != pOther->aidxGstRegShadows[idxGstReg]) 4400 4478 { 4401 Log12(("iemNativeEmitEndIf: dropping gst % #RX64from hst %s\n",4479 Log12(("iemNativeEmitEndIf: dropping gst %s from hst %s\n", 4402 4480 g_aGstShadowInfo[idxGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg])); 4403 4481 iemNativeRegClearGstRegShadowing(pReNative, idxHstReg, off); … … 5008 5086 pReNative->Core.bmStack |= RT_BIT_32(iSlot); 5009 5087 pReNative->Core.aVars[idxVar].idxStackSlot = iSlot; 5088 Log11(("iemNativeVarSetKindToStack: idxVar=%d iSlot=%#x\n", idxVar, iSlot)); 5010 5089 return; 5011 5090 } … … 5026 5105 pReNative->Core.bmStack |= (fBitAllocMask << iSlot); 5027 5106 pReNative->Core.aVars[idxVar].idxStackSlot = iSlot; 5107 Log11(("iemNativeVarSetKindToStack: idxVar=%d iSlot=%#x/%#x (cbVar=%#x)\n", 5108 idxVar, iSlot, fBitAllocMask, pReNative->Core.aVars[idxVar].cbVar)); 5028 5109 return; 5029 5110 } … … 5201 5282 } 5202 5283 5284 5285 /** 5286 * Worker that frees the stack slots for variable @a idxVar if any allocated. 5287 * 5288 * This is used both by iemNativeVarFreeOneWorker and iemNativeEmitCallCommon. 5289 */ 5290 DECL_FORCE_INLINE(void) iemNativeVarFreeStackSlots(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar) 5291 { 5292 uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot; 5293 Assert(idxStackSlot == UINT8_MAX || idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS); 5294 if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS) 5295 { 5296 uint8_t const cbVar = pReNative->Core.aVars[idxVar].cbVar; 5297 uint8_t const cSlots = (cbVar + sizeof(uint64_t) - 1) / sizeof(uint64_t); 5298 uint32_t const fAllocMask = (uint32_t)(RT_BIT_32(cSlots) - 1U); 5299 Assert(cSlots > 0); 5300 Assert(((pReNative->Core.bmStack >> idxStackSlot) & fAllocMask) == fAllocMask); 5301 Log11(("iemNativeVarFreeStackSlots: idxVar=%d iSlot=%#x/%#x (cbVar=%#x)\n", idxVar, idxStackSlot, fAllocMask, cbVar)); 5302 pReNative->Core.bmStack &= ~(fAllocMask << idxStackSlot); 5303 pReNative->Core.aVars[idxVar].idxStackSlot = UINT8_MAX; 5304 } 5305 } 5306 5307 5308 /** 5309 * Worker that frees a single variable. 5310 * 5311 * ASSUMES that @a idxVar is valid. 5312 */ 5313 DECLINLINE(void) iemNativeVarFreeOneWorker(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar) 5314 { 5315 Assert( pReNative->Core.aVars[idxVar].enmKind > kIemNativeVarKind_Invalid 5316 && pReNative->Core.aVars[idxVar].enmKind < kIemNativeVarKind_End); 5317 5318 /* Free the host register first if any assigned. */ 5319 uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg; 5320 if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs)) 5321 { 5322 Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == idxVar); 5323 pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX; 5324 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg); 5325 } 5326 5327 /* Free argument mapping. */ 5328 uint8_t const uArgNo = pReNative->Core.aVars[idxVar].uArgNo; 5329 if (idxHstReg < RT_ELEMENTS(pReNative->Core.aidxArgVars)) 5330 pReNative->Core.aidxArgVars[uArgNo] = UINT8_MAX; 5331 5332 /* Free the stack slots. */ 5333 iemNativeVarFreeStackSlots(pReNative, idxVar); 5334 5335 /* Free the actual variable. */ 5336 pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_Invalid; 5337 pReNative->Core.bmVars &= ~RT_BIT_32(idxVar); 5338 } 5339 5340 5341 /** 5342 * Worker for iemNativeVarFreeAll that's called when there is anything to do. 5343 */ 5344 DECLINLINE(void) iemNativeVarFreeAllSlow(PIEMRECOMPILERSTATE pReNative, uint32_t bmVars) 5345 { 5346 while (bmVars != 0) 5347 { 5348 uint8_t const idxVar = ASMBitFirstSetU32(bmVars) - 1; 5349 bmVars &= ~RT_BIT_32(idxVar); 5350 5351 #if 1 /** @todo optimize by simplifying this later... */ 5352 iemNativeVarFreeOneWorker(pReNative, idxVar); 5353 #else 5354 /* Only need to free the host register, the rest is done as bulk updates below. */ 5355 uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg; 5356 if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs)) 5357 { 5358 Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == idxVar); 5359 pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX; 5360 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg); 5361 } 5362 #endif 5363 } 5364 #if 0 /** @todo optimize by simplifying this later... */ 5365 pReNative->Core.bmVars = 0; 5366 pReNative->Core.bmStack = 0; 5367 pReNative->Core.u64ArgVars = UINT64_MAX; 5368 #endif 5369 } 5370 5371 5372 /** 5373 * This is called by IEM_MC_END() to clean up all variables. 5374 */ 5375 DECL_FORCE_INLINE(void) iemNativeVarFreeAll(PIEMRECOMPILERSTATE pReNative) 5376 { 5377 uint32_t const bmVars = pReNative->Core.bmVars; 5378 if (bmVars != 0) 5379 iemNativeVarFreeAllSlow(pReNative, bmVars); 5380 Assert(pReNative->Core.u64ArgVars == UINT64_MAX); 5381 Assert(pReNative->Core.bmStack == 0); 5382 } 5203 5383 5204 5384 … … 5396 5576 AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX, 5397 5577 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3)); 5398 off = iemNativeEmitLoadGprByBp(pReNative, off, idxArgReg, 5399 IEMNATIVE_FP_OFF_STACK_VARS 5400 + pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t)); 5578 off = iemNativeEmitLoadGprByBp(pReNative, off, idxArgReg, iemNativeVarCalcBpDisp(pReNative, idxVar)); 5401 5579 continue; 5402 5580 … … 5412 5590 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_4)); 5413 5591 off = iemNativeEmitLeaGprByBp(pReNative, off, idxArgReg, 5414 IEMNATIVE_FP_OFF_STACK_VARS 5415 + pReNative->Core.aVars[idxOtherVar].idxStackSlot * sizeof(uint64_t)); 5592 iemNativeStackCalcBpDisp(pReNative->Core.aVars[idxOtherVar].idxStackSlot)); 5416 5593 continue; 5417 5594 } … … 5468 5645 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3)); 5469 5646 off = iemNativeEmitLoadGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG /* is free */, 5470 IEMNATIVE_FP_OFF_STACK_VARS 5471 + pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t)); 5647 iemNativeVarCalcBpDisp(pReNative, idxVar)); 5472 5648 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG); 5473 5649 continue; … … 5484 5660 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_4)); 5485 5661 off = iemNativeEmitLeaGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, 5486 IEMNATIVE_FP_OFF_STACK_VARS 5487 + pReNative->Core.aVars[idxOtherVar].idxStackSlot * sizeof(uint64_t)); 5662 iemNativeStackCalcBpDisp(pReNative->Core.aVars[idxOtherVar].idxStackSlot)); 5488 5663 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG); 5489 5664 continue; … … 5516 5691 /** @todo There is a special with IEM_MC_MEM_MAP_U16_RW and friends requiring 5517 5692 * a IEM_MC_MEM_COMMIT_AND_UNMAP_RW after a AIMPL call typically with 5518 * an argument value. */5693 * an argument value. There is also some FPU stuff. */ 5519 5694 for (uint32_t i = cHiddenArgs; i < cArgs; i++) 5520 5695 { 5521 uint8_t idxVar = pReNative->Core.aidxArgVars[i];5696 uint8_t const idxVar = pReNative->Core.aidxArgVars[i]; 5522 5697 Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars)); 5698 5699 /* no need to free registers: */ 5700 AssertMsg(i < IEMNATIVE_CALL_ARG_GREG_COUNT 5701 ? pReNative->Core.aVars[idxVar].idxReg == g_aidxIemNativeCallRegs[i] 5702 || pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX 5703 : pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX, 5704 ("i=%d idxVar=%d idxReg=%d, expected %d\n", i, idxVar, pReNative->Core.aVars[idxVar].idxReg, 5705 i < IEMNATIVE_CALL_ARG_GREG_COUNT ? g_aidxIemNativeCallRegs[i] : UINT8_MAX)); 5706 5523 5707 pReNative->Core.aidxArgVars[i] = UINT8_MAX; 5524 5708 pReNative->Core.bmVars &= ~RT_BIT_32(idxVar); 5709 iemNativeVarFreeStackSlots(pReNative, idxVar); 5525 5710 } 5526 5711 Assert(pReNative->Core.u64ArgVars == UINT64_MAX); … … 5827 6012 5828 6013 5829 #if 05830 6014 #define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) \ 5831 off = iemNativeEmitStoreGregU16Const(pReNative, off, a_iGReg, a_u16Value) 6015 off = iemNativeEmitStoreGregU16(pReNative, off, a_iGReg, a_u16Value) 6016 6017 /** Emits code for IEM_MC_STORE_GREG_U16. */ 6018 DECL_INLINE_THROW(uint32_t) 6019 iemNativeEmitStoreGregU16Const(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint16_t uValue) 6020 { 6021 pReNative->pInstrBuf[off++] = 0xcc; 6022 Assert(iGReg < 16); 6023 uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 6024 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + iGReg), 6025 kIemNativeGstRegUse_ForUpdate); 6026 #ifdef RT_ARCH_AMD64 6027 /* mov reg16, imm16 */ 6028 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5); 6029 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 6030 if (idxGstTmpReg >= 8) 6031 pbCodeBuf[off++] = X86_OP_REX_B; 6032 pbCodeBuf[off++] = 0xb8 + (idxGstTmpReg & 7); 6033 pbCodeBuf[off++] = RT_BYTE1(uValue); 6034 pbCodeBuf[off++] = RT_BYTE2(uValue); 6035 6036 #elif defined(RT_ARCH_ARM64) 6037 /* movk xdst, #uValue, lsl #0 */ 6038 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 6039 pu32CodeBuf[off++] = Armv8A64MkInstrMovK(idxGstTmpReg, uValue); 6040 6041 #else 6042 # error "Port me!" 6043 #endif 6044 6045 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 6046 6047 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg & 15])); 6048 iemNativeRegFreeTmp(pReNative, idxGstTmpReg); 6049 return off; 6050 } 6051 5832 6052 5833 6053 /** Emits code for IEM_MC_STORE_GREG_U16. */ … … 5835 6055 iemNativeEmitStoreGregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t idxValueVar) 5836 6056 { 5837 Assert(iGReg < 16) 6057 Assert(iGReg < 16); 6058 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxValueVar); 6059 6060 /* 6061 * If it's a constant value (unlikely) we treat this as a 6062 * IEM_MC_STORE_GREG_U16_CONST statement. 6063 */ 6064 if (pReNative->Core.aVars[idxValueVar].enmKind == kIemNativeVarKind_Stack) 6065 { /* likely */ } 6066 else 6067 { 6068 AssertStmt(pReNative->Core.aVars[idxValueVar].enmKind != kIemNativeVarKind_Immediate, 6069 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND)); 6070 return iemNativeEmitStoreGregU16Const(pReNative, off, iGReg, (uint16_t)pReNative->Core.aVars[idxValueVar].u.uValue); 6071 } 6072 5838 6073 uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, 5839 6074 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + iGReg), 5840 6075 kIemNativeGstRegUse_ForUpdate); 5841 6076 5842 5843 6077 #ifdef RT_ARCH_AMD64 6078 /* mov reg16, reg16 or [mem16] */ 5844 6079 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 12); 5845 5846 /* To the lowest byte of the register: mov r8, imm8 */ 5847 if (iGRegEx < 16) 5848 { 6080 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 6081 if (pReNative->Core.aVars[idxValueVar].idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs)) 6082 { 6083 if (idxGstTmpReg >= 8 || pReNative->Core.aVars[idxValueVar].idxReg >= 8) 6084 pbCodeBuf[off++] = (idxGstTmpReg >= 8 ? X86_OP_REX_R : 0) 6085 | (pReNative->Core.aVars[idxValueVar].idxReg >= 8 ? X86_OP_REX_B : 0); 6086 pbCodeBuf[off++] = 0x8b; 6087 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxGstTmpReg & 7, pReNative->Core.aVars[idxValueVar].idxReg & 7); 6088 } 6089 else 6090 { 6091 AssertStmt(pReNative->Core.aVars[idxValueVar].idxStackSlot != UINT8_MAX, 6092 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_4)); 5849 6093 if (idxGstTmpReg >= 8) 5850 pbCodeBuf[off++] = X86_OP_REX_B; 5851 else if (idxGstTmpReg >= 4) 5852 pbCodeBuf[off++] = X86_OP_REX; 5853 pbCodeBuf[off++] = 0xb0 + (idxGstTmpReg & 7); 5854 pbCodeBuf[off++] = u8Value; 5855 } 5856 /* Otherwise it's to ah, ch, dh or bh: use mov r8, imm8 if we can, otherwise, we rotate. */ 5857 else if (idxGstTmpReg < 4) 5858 { 5859 pbCodeBuf[off++] = 0xb4 + idxGstTmpReg; 5860 pbCodeBuf[off++] = u8Value; 5861 } 5862 else 5863 { 5864 /* ror reg64, 8 */ 5865 pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B); 5866 pbCodeBuf[off++] = 0xc1; 5867 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, idxGstTmpReg & 7); 5868 pbCodeBuf[off++] = 8; 5869 5870 /* mov reg8, imm8 */ 5871 if (idxGstTmpReg >= 8) 5872 pbCodeBuf[off++] = X86_OP_REX_B; 5873 else if (idxGstTmpReg >= 4) 5874 pbCodeBuf[off++] = X86_OP_REX; 5875 pbCodeBuf[off++] = 0xb0 + (idxGstTmpReg & 7); 5876 pbCodeBuf[off++] = u8Value; 5877 5878 /* rol reg64, 8 */ 5879 pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B); 5880 pbCodeBuf[off++] = 0xc1; 5881 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7); 5882 pbCodeBuf[off++] = 8; 6094 pbCodeBuf[off++] = X86_OP_REX_R; 6095 pbCodeBuf[off++] = 0x8b; 6096 off = iemNativeEmitGprByBpDisp(pbCodeBuf, off, idxGstTmpReg, iemNativeVarCalcBpDisp(pReNative, idxValueVar), pReNative); 5883 6097 } 5884 6098 5885 6099 #elif defined(RT_ARCH_ARM64) 5886 uint8_t const idxImmReg = iemNativeRegAllocTmpImm(pReNative, &off, u8Value); 5887 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 5888 if (iGRegEx < 16) 5889 /* bfi w1, w2, 0, 8 - moves bits 7:0 from idxImmReg to idxGstTmpReg bits 7:0. */ 5890 pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxImmReg, 0, 8); 5891 else 5892 /* bfi w1, w2, 8, 8 - moves bits 7:0 from idxImmReg to idxGstTmpReg bits 15:8. */ 5893 pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxImmReg, 8, 8); 5894 iemNativeRegFreeTmp(pReNative, idxImmReg); 6100 /* bfi w1, w2, 0, 16 - moves bits 15:0 from idxVarReg to idxGstTmpReg bits 15:0. */ 6101 uint8_t const idxVarReg = iemNativeVarAllocRegister(pReNative, idxDstVar, &off); 6102 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 6103 pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxVarReg, 0, 16); 5895 6104 5896 6105 #else … … 5900 6109 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 5901 6110 5902 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGRegEx & 15])); 5903 6111 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg & 15])); 5904 6112 iemNativeRegFreeTmp(pReNative, idxGstTmpReg); 5905 6113 return off; 5906 6114 } 5907 #endif5908 6115 5909 6116 … … 6800 7007 iemNativeDbgInfoAddThreadedCall(pReNative, (IEMTHREADEDFUNCS)pCallEntry->enmFunction, pfnRecom != NULL); 6801 7008 #endif 6802 #if defined(VBOX_STRICT) && 17009 #if defined(VBOX_STRICT) 6803 7010 off = iemNativeEmitMarker(pReNative, off, 6804 7011 RT_MAKE_U32((pTb->Thrd.cCalls - cCallsLeft - 1) | (pfnRecom ? 0x8000 : 0), 6805 7012 pCallEntry->enmFunction)); 6806 7013 #endif 7014 #if defined(VBOX_STRICT) 7015 iemNativeRegAssertSanity(pReNative); 7016 #endif 6807 7017 6808 7018 /* 6809 7019 * Actual work. 6810 7020 */ 7021 Log2(("%u[%u]: %s%s\n", pTb->Thrd.cCalls - cCallsLeft - 1, pCallEntry->idxInstr, 7022 g_apszIemThreadedFunctions[pCallEntry->enmFunction], pfnRecom ? "" : "(todo)")); 6811 7023 if (pfnRecom) /** @todo stats on this. */ 6812 7024 { -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r102022 r102065 804 804 /** 805 805 * Checks that we didn't exceed the space requested in the last 806 * iemNativeInstrBufEnsure() call. */ 806 * iemNativeInstrBufEnsure() call. 807 */ 807 808 #define IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(a_pReNative, a_off) \ 808 809 AssertMsg((a_off) <= (a_pReNative)->offInstrBufChecked, \ 809 810 ("off=%#x offInstrBufChecked=%#x\n", (a_off), (a_pReNative)->offInstrBufChecked)) 810 811 812 /** 813 * Checks that a variable index is valid. 814 */ 815 #define IEMNATIVE_ASSERT_VAR_IDX(a_pReNative, a_idxVar) \ 816 AssertMsg( (unsigned)(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \ 817 && ((a_pReNative)->Core.bmVars & RT_BIT_32(a_idxVar)), ("%s=%d\n", #a_idxVar, a_idxVar)) 818 819 /** 820 * Calculates the stack address of a variable as a [r]BP displacement value. 821 */ 822 DECL_FORCE_INLINE(int32_t) 823 iemNativeStackCalcBpDisp(uint8_t idxStackSlot) 824 { 825 Assert(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS); 826 return idxStackSlot * sizeof(uint64_t) + IEMNATIVE_FP_OFF_STACK_VARS; 827 } 828 829 /** 830 * Calculates the stack address of a variable as a [r]BP displacement value. 831 */ 832 DECL_FORCE_INLINE(int32_t) 833 iemNativeVarCalcBpDisp(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar) 834 { 835 return iemNativeStackCalcBpDisp(pReNative->Core.aVars[idxVar].idxStackSlot); 836 } 837 811 838 /** @} */ 812 839
Note:
See TracChangeset
for help on using the changeset viewer.