Changeset 102370 in vbox
- Timestamp:
- Nov 28, 2023 10:17:45 PM (14 months ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/armv8.h
r102081 r102370 3002 3002 uint32_t cImm6Ror, uint32_t uImm6S, bool f64Bit, uint32_t uN1) 3003 3003 { 3004 Assert(cImm6Ror < (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegResult < 32); Assert(u2Opc < 4);3005 Assert(uImm6S < (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegSrc < 32); Assert(uN1 <= (unsigned)f64Bit);3004 Assert(cImm6Ror <= (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegResult < 32); Assert(u2Opc < 4); 3005 Assert(uImm6S <= (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegSrc < 32); Assert(uN1 <= (unsigned)f64Bit); 3006 3006 return ((uint32_t)f64Bit << 31) 3007 3007 | (u2Opc << 29) -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102368 r102370 2450 2450 2451 2451 /** 2452 * Reassigns a variable to a different register specified by the caller. 2453 * 2454 * @returns The new code buffer position. 2455 * @param pReNative The native recompile state. 2456 * @param off The current code buffer position. 2457 * @param idxVar The variable index. 2458 * @param idxRegOld The old host register number. 2459 * @param idxRegNew The new host register number. 2460 * @param pszCaller The caller for logging. 2461 */ 2462 static uint32_t iemNativeRegMoveVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar, 2463 uint8_t idxRegOld, uint8_t idxRegNew, const char *pszCaller) 2464 { 2465 Assert(pReNative->Core.aVars[idxVar].idxReg == idxRegOld); 2466 RT_NOREF(pszCaller); 2467 2468 iemNativeRegClearGstRegShadowing(pReNative, idxRegNew, off); 2469 2470 uint64_t fGstRegShadows = pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows; 2471 Log12(("%s: moving idxVar=%d from %s to %s (fGstRegShadows=%RX64)\n", 2472 pszCaller, idxVar, g_apszIemNativeHstRegNames[idxRegOld], g_apszIemNativeHstRegNames[idxRegNew], fGstRegShadows)); 2473 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegNew, idxRegOld); 2474 2475 pReNative->Core.aHstRegs[idxRegNew].fGstRegShadows = fGstRegShadows; 2476 pReNative->Core.aHstRegs[idxRegNew].enmWhat = kIemNativeWhat_Var; 2477 pReNative->Core.aHstRegs[idxRegNew].idxVar = idxVar; 2478 if (fGstRegShadows) 2479 { 2480 pReNative->Core.bmHstRegsWithGstShadow |= RT_BIT_32(idxRegNew); 2481 while (fGstRegShadows) 2482 { 2483 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1; 2484 fGstRegShadows &= ~RT_BIT_64(idxGstReg); 2485 2486 Assert(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxRegOld); 2487 pReNative->Core.aidxGstRegShadows[idxGstReg] = idxRegNew; 2488 } 2489 } 2490 2491 pReNative->Core.aVars[idxVar].idxReg = (uint8_t)idxRegNew; 2492 pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0; 2493 pReNative->Core.bmHstRegs = RT_BIT_32(idxRegNew) | (pReNative->Core.bmHstRegs & ~RT_BIT_32(idxRegOld)); 2494 return off; 2495 } 2496 2497 2498 /** 2452 2499 * Moves a variable to a different register or spills it onto the stack. 2453 2500 * … … 2455 2502 * kinds can easily be recreated if needed later. 2456 2503 * 2457 * @returns The new code buffer position , UINT32_MAX on failure.2504 * @returns The new code buffer position. 2458 2505 * @param pReNative The native recompile state. 2459 2506 * @param off The current code buffer position. … … 2496 2543 fRegs &= ~pReNative->Core.bmHstRegsWithGstShadow; 2497 2544 unsigned const idxRegNew = ASMBitFirstSetU32(fRegs) - 1; 2498 iemNativeRegClearGstRegShadowing(pReNative, idxRegNew, off); 2499 2500 uint64_t fGstRegShadows = pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows; 2501 Log12(("iemNativeRegMoveOrSpillStackVar: moving idxVar=%d from %s to %s (fGstRegShadows=%RX64)\n", 2502 idxVar, g_apszIemNativeHstRegNames[idxRegOld], g_apszIemNativeHstRegNames[idxRegNew], fGstRegShadows)); 2503 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegNew, idxRegOld); 2504 2505 pReNative->Core.aHstRegs[idxRegNew].fGstRegShadows = fGstRegShadows; 2506 pReNative->Core.aHstRegs[idxRegNew].enmWhat = kIemNativeWhat_Var; 2507 pReNative->Core.aHstRegs[idxRegNew].idxVar = idxVar; 2508 if (fGstRegShadows) 2509 { 2510 pReNative->Core.bmHstRegsWithGstShadow |= RT_BIT_32(idxRegNew); 2511 while (fGstRegShadows) 2512 { 2513 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1; 2514 fGstRegShadows &= ~RT_BIT_64(idxGstReg); 2515 2516 Assert(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxRegOld); 2517 pReNative->Core.aidxGstRegShadows[idxGstReg] = idxRegNew; 2518 } 2519 } 2520 2521 pReNative->Core.aVars[idxVar].idxReg = (uint8_t)idxRegNew; 2522 pReNative->Core.bmHstRegs |= RT_BIT_32(idxRegNew); 2523 } 2545 return iemNativeRegMoveVar(pReNative, off, idxVar, idxRegOld, idxRegNew, "iemNativeRegMoveOrSpillStackVar"); 2546 } 2547 2524 2548 /* 2525 2549 * Otherwise we must spill the register onto the stack. 2526 2550 */ 2527 else 2528 { 2529 uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot; 2530 Log12(("iemNativeRegMoveOrSpillStackVar: spilling idxVar=%d/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n", 2531 idxVar, idxRegOld, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off)); 2532 AssertStmt(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_7)); 2533 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld); 2534 2535 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX; 2536 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld); 2537 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows; 2538 } 2539 2540 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxRegOld); 2551 uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot; 2552 Log12(("iemNativeRegMoveOrSpillStackVar: spilling idxVar=%d/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n", 2553 idxVar, idxRegOld, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off)); 2554 AssertStmt(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_7)); 2555 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld); 2556 2557 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX; 2558 pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld); 2559 pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows; 2560 pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxRegOld); 2541 2561 pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows = 0; 2542 2562 return off; … … 3586 3606 3587 3607 # elif defined(RT_ARCH_ARM64) 3588 /* mov TMP0, [gstreg] */3589 off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, enmGstReg);3590 3591 3608 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 3609 3592 3610 /* b.eq +1 */ 3593 3611 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(kArmv8InstrCond_Eq, 2); 3594 /* brk #0x 1000+enmGstReg*/3612 /* brk #0x2000 */ 3595 3613 pu32CodeBuf[off++] = Armv8A64MkInstrBrk(UINT32_C(0x2000)); 3596 3614 … … 5571 5589 if ( uArgNo < RT_ELEMENTS(g_aidxIemNativeCallRegs) 5572 5590 && !(pReNative->Core.bmHstRegs & RT_BIT_32(g_aidxIemNativeCallRegs[uArgNo]))) 5591 { 5573 5592 idxReg = g_aidxIemNativeCallRegs[uArgNo]; 5593 Log11(("iemNativeVarAllocRegister: idxVar=%u idxReg=%u (matching arg %u)\n", idxVar, idxReg, uArgNo)); 5594 } 5574 5595 else 5575 5596 { … … 5586 5607 Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows == 0); 5587 5608 Assert(!(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxReg))); 5609 Log11(("iemNativeVarAllocRegister: idxVar=%u idxReg=%u (uArgNo=%u)\n", idxVar, idxReg, uArgNo)); 5588 5610 } 5589 5611 else … … 5592 5614 IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK & fNotArgsMask); 5593 5615 AssertStmt(idxReg != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_ALLOCATOR_NO_FREE_VAR)); 5616 Log11(("iemNativeVarAllocRegister: idxVar=%u idxReg=%u (slow, uArgNo=%u)\n", idxVar, idxReg, uArgNo)); 5594 5617 } 5595 5618 } … … 6041 6064 if (uArgNo == i) 6042 6065 { /* prefect */ } 6066 /* The variable allocator logic should make sure this is impossible, 6067 except for when the return register is used as a parameter (ARM, 6068 but not x86). */ 6069 #if RT_BIT_32(IEMNATIVE_CALL_RET_GREG) & IEMNATIVE_CALL_ARGS_GREG_MASK 6070 else if (idxArgReg == IEMNATIVE_CALL_RET_GREG && uArgNo != UINT8_MAX) 6071 { 6072 # ifdef IEMNATIVE_FP_OFF_STACK_ARG0 6073 # error "Implement this" 6074 # endif 6075 Assert(uArgNo < IEMNATIVE_CALL_ARG_GREG_COUNT); 6076 uint8_t const idxFinalArgReg = g_aidxIemNativeCallRegs[uArgNo]; 6077 AssertStmt(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxFinalArgReg)), 6078 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_10)); 6079 off = iemNativeRegMoveVar(pReNative, off, idxVar, idxArgReg, idxFinalArgReg, "iemNativeEmitCallCommon"); 6080 } 6081 #endif 6043 6082 else 6044 6083 { 6045 /* The variable allocator logic should make sure this is impossible. */6046 6084 AssertStmt(uArgNo == UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_10)); 6047 6085 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r102368 r102370 195 195 # define IEMNATIVE_CALL_ARG2_GREG X86_GREG_x8 196 196 # define IEMNATIVE_CALL_ARG3_GREG X86_GREG_x9 197 # define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) \ 198 | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) \ 199 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) \ 200 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) \ 201 | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) ) 197 202 # define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \ 198 203 | RT_BIT_32(X86_GREG_xCX) \ … … 210 215 # define IEMNATIVE_CALL_ARG4_GREG X86_GREG_x8 211 216 # define IEMNATIVE_CALL_ARG5_GREG X86_GREG_x9 217 # define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) \ 218 | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) \ 219 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) \ 220 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) \ 221 | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) \ 222 | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG) ) 212 223 # define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \ 213 224 | RT_BIT_32(X86_GREG_xCX) \ … … 232 243 # define IEMNATIVE_CALL_ARG6_GREG ARMV8_A64_REG_X6 233 244 # define IEMNATIVE_CALL_ARG7_GREG ARMV8_A64_REG_X7 245 # define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(ARMV8_A64_REG_X0) \ 246 | RT_BIT_32(ARMV8_A64_REG_X1) \ 247 | RT_BIT_32(ARMV8_A64_REG_X2) \ 248 | RT_BIT_32(ARMV8_A64_REG_X3) \ 249 | RT_BIT_32(ARMV8_A64_REG_X4) \ 250 | RT_BIT_32(ARMV8_A64_REG_X5) \ 251 | RT_BIT_32(ARMV8_A64_REG_X6) \ 252 | RT_BIT_32(ARMV8_A64_REG_X7) ) 234 253 # define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(ARMV8_A64_REG_X0) \ 235 254 | RT_BIT_32(ARMV8_A64_REG_X1) \ -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r102368 r102370 778 778 else 779 779 { 780 off = iemNativeEmitLoadGprImm64(pReNative, off, iG rpDst, (int64)iAddend);780 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, (int64_t)iAddend); 781 781 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 782 782 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprSrc, iGprDst);
Note:
See TracChangeset
for help on using the changeset viewer.