Changeset 106315 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Oct 15, 2024 1:05:43 AM (3 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp
r106187 r106315 210 210 { 211 211 uint8_t const idxEflReg = !a_fCheckIrqs ? UINT8_MAX 212 : iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly,213 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER));212 : iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, 213 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER)); 214 214 uint8_t const idxTmpReg1 = iemNativeRegAllocTmp(pReNative, &off); 215 215 uint8_t const idxTmpReg2 = a_fCheckIrqs ? iemNativeRegAllocTmp(pReNative, &off) : UINT8_MAX; -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r106196 r106315 440 440 off = iemNativeRegFlushPendingWrites(pReNative, off); 441 441 442 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ForUpdate,443 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER),444 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER));442 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsForUpdate(pReNative, &off, 443 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER), 444 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER)); 445 445 off = iemNativeEmitTbExitIfAnyBitsSetInGpr<kIemNativeLabelType_ReturnWithFlags>(pReNative, off, idxEflReg, 446 446 X86_EFL_TF … … 3427 3427 3428 3428 /* Get the eflags. */ 3429 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits);3429 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEflBits); 3430 3430 3431 3431 /* Test and jump. */ … … 3455 3455 3456 3456 /* Get the eflags. */ 3457 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits);3457 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEflBits); 3458 3458 3459 3459 /* Test and jump. */ … … 3484 3484 3485 3485 /* Get the eflags. */ 3486 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBit);3486 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEflBit); 3487 3487 3488 3488 /* Test and jump. */ … … 3513 3513 3514 3514 /* Get the eflags. */ 3515 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBit);3515 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEflBit); 3516 3516 3517 3517 /* Test and jump. */ … … 3553 3553 3554 3554 /* Get the eflags. */ 3555 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits);3555 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEflBits); 3556 3556 3557 3557 #ifdef RT_ARCH_AMD64 … … 3635 3635 3636 3636 /* Get the eflags. */ 3637 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits);3637 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEflBits); 3638 3638 3639 3639 #ifdef RT_ARCH_AMD64 … … 3812 3812 register allocator state. 3813 3813 Doing EFLAGS first as it's more likely to be loaded, right? */ 3814 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBit);3814 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEflBit); 3815 3815 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), 3816 3816 kIemNativeGstRegUse_ReadOnly); … … 3881 3881 register allocator state. 3882 3882 Doing EFLAGS first as it's more likely to be loaded, right? */ 3883 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEFlBit);3883 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsReadOnly(pReNative, &off, fLivenessEFlBit); 3884 3884 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), 3885 3885 kIemNativeGstRegUse_ReadOnly); … … 5962 5962 * zero, but since iemNativeVarRegisterSet clears the shadowing, 5963 5963 * that's counter productive... */ 5964 uint8_t const idxGstReg = iemNativeRegAllocTmpForGuestEFlags (pReNative, &off, kIemNativeGstRegUse_ForUpdate,5965 a_fLivenessEflInput, a_fLivenessEflOutput);5964 uint8_t const idxGstReg = iemNativeRegAllocTmpForGuestEFlagsForUpdate(pReNative, &off, 5965 a_fLivenessEflInput, a_fLivenessEflOutput); 5966 5966 iemNativeVarRegisterSet(pReNative, idxVarEFlags, idxGstReg, off, true /*fAllocated*/); 5967 5967 } … … 6103 6103 DECL_INLINE_THROW(uint32_t) iemNativeEmitModifyEFlagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off) 6104 6104 { 6105 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ForUpdate, 6106 a_enmOp == kIemNativeEmitEflOp_Flip ? a_fLivenessEflBit : 0, 6107 a_fLivenessEflBit); 6105 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlagsForUpdate(pReNative, &off, 6106 a_enmOp == kIemNativeEmitEflOp_Flip 6107 ? a_fLivenessEflBit : 0, 6108 a_fLivenessEflBit); 6108 6109 6109 6110 /* Using 'if constexpr' forces code elimination in debug builds with VC. */ … … 9797 9798 9798 9799 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off, false /*fPreferVolatile*/); 9799 uint8_t const idxRegMxCsr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_MxCsr, kIemNativeGstRegUse_ReadOnly); 9800 uint8_t const idxRegMxCsr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_MxCsr, 9801 kIemNativeGstRegUse_ReadOnly); 9800 9802 9801 9803 /* … … 9849 9851 /** @todo Check the host supported flags (needs additional work to get the host features from CPUM) 9850 9852 * and implement alternate handling if FEAT_AFP is present. */ 9851 uint8_t const idxRegMxCsr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_MxCsr, kIemNativeGstRegUse_ReadOnly); 9853 uint8_t const idxRegMxCsr = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_MxCsr, 9854 kIemNativeGstRegUse_ReadOnly); 9852 9855 9853 9856 PIEMNATIVEINSTR pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r106314 r106315 3594 3594 * iemNativeRegAllocTmpForGuestEFlags(). 3595 3595 * 3596 * See iemNativeRegAllocTmpForGuestReg() for details. 3597 */ 3598 static uint8_t 3599 iemNativeRegAllocTmpForGuestRegCommon(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg, 3600 IEMNATIVEGSTREGUSE enmIntendedUse, bool fNoVolatileRegs) 3596 * See iemNativeRegAllocTmpForGuestRegInt() for details. 3597 */ 3598 template<IEMNATIVEGSTREGUSE const a_enmIntendedUse, uint32_t const a_fRegMask> 3599 static uint8_t iemNativeRegAllocTmpForGuestRegCommon(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3601 3600 { 3602 3601 Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0); … … 3604 3603 static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" }; 3605 3604 #endif 3606 uint32_t const fRegMask = !fNoVolatileRegs3607 ? IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK3608 : IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK;3609 3605 3610 3606 /* … … 3629 3625 /* If the purpose is calculations, try duplicate the register value as 3630 3626 we'll be clobbering the shadow. */ 3631 if ( enmIntendedUse == kIemNativeGstRegUse_Calculation3627 if ( a_enmIntendedUse == kIemNativeGstRegUse_Calculation 3632 3628 && ( ~pReNative->Core.bmHstRegs 3633 3629 & ~pReNative->Core.bmHstRegsWithGstShadow 3634 3630 & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK))) 3635 3631 { 3636 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask);3632 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, a_fRegMask); 3637 3633 3638 3634 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg); … … 3645 3641 /* If the current register matches the restrictions, go ahead and allocate 3646 3642 it for the caller. */ 3647 else if ( fRegMask & RT_BIT_32(idxReg))3643 else if (a_fRegMask & RT_BIT_32(idxReg)) 3648 3644 { 3649 3645 pReNative->Core.bmHstRegs |= RT_BIT_32(idxReg); 3650 3646 pReNative->Core.aHstRegs[idxReg].enmWhat = kIemNativeWhat_Tmp; 3651 3647 pReNative->Core.aHstRegs[idxReg].idxVar = UINT8_MAX; 3652 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)3653 Log12(("iemNativeRegAllocTmpForGuestReg: Reusing %s for guest %s %s\n", 3654 g_a pszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));3648 if RT_CONSTEXPR_IF(a_enmIntendedUse != kIemNativeGstRegUse_Calculation) 3649 Log12(("iemNativeRegAllocTmpForGuestReg: Reusing %s for guest %s %s\n", g_apszIemNativeHstRegNames[idxReg], 3650 g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[a_enmIntendedUse])); 3655 3651 else 3656 3652 { … … 3665 3661 else 3666 3662 { 3667 Assert( fNoVolatileRegs);3668 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask & ~RT_BIT_32(idxReg),3669 !fNoVolatileRegs3670 && enmIntendedUse == kIemNativeGstRegUse_Calculation);3663 Assert(!(a_fRegMask & IEMNATIVE_CALL_VOLATILE_GREG_MASK)); 3664 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, a_fRegMask & ~RT_BIT_32(idxReg), 3665 (a_fRegMask & IEMNATIVE_CALL_VOLATILE_GREG_MASK) 3666 && a_enmIntendedUse == kIemNativeGstRegUse_Calculation); 3671 3667 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg); 3672 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)3668 if RT_CONSTEXPR_IF(a_enmIntendedUse != kIemNativeGstRegUse_Calculation) 3673 3669 { 3674 3670 iemNativeRegTransferGstRegShadowing(pReNative, idxReg, idxRegNew, enmGstReg, *poff); 3675 3671 Log12(("iemNativeRegAllocTmpForGuestReg: Transfering %s to %s for guest %s %s\n", 3676 3672 g_apszIemNativeHstRegNames[idxReg], g_apszIemNativeHstRegNames[idxRegNew], 3677 g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[ enmIntendedUse]));3673 g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[a_enmIntendedUse])); 3678 3674 } 3679 3675 else … … 3692 3688 * guest shadow copy assignment to the new register. 3693 3689 */ 3694 AssertMsg( enmIntendedUse != kIemNativeGstRegUse_ForUpdate3695 && enmIntendedUse != kIemNativeGstRegUse_ForFullWrite,3696 ("This shouldn't happen: idxReg=%d enmGstReg=%d enmIntendedUse=%s\n",3697 idxReg, enmGstReg, s_pszIntendedUse[ enmIntendedUse]));3690 AssertMsg( a_enmIntendedUse != kIemNativeGstRegUse_ForUpdate 3691 && a_enmIntendedUse != kIemNativeGstRegUse_ForFullWrite, 3692 ("This shouldn't happen: idxReg=%d enmGstReg=%d a_enmIntendedUse=%s\n", 3693 idxReg, enmGstReg, s_pszIntendedUse[a_enmIntendedUse])); 3698 3694 3699 3695 /** @todo share register for readonly access. */ 3700 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask,3701 enmIntendedUse == kIemNativeGstRegUse_Calculation);3702 3703 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)3696 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, a_fRegMask, 3697 a_enmIntendedUse == kIemNativeGstRegUse_Calculation); 3698 3699 if RT_CONSTEXPR_IF(a_enmIntendedUse != kIemNativeGstRegUse_ForFullWrite) 3704 3700 *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg); 3705 3701 3706 if (enmIntendedUse != kIemNativeGstRegUse_ForUpdate3707 &&enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)3702 if RT_CONSTEXPR_IF( a_enmIntendedUse != kIemNativeGstRegUse_ForUpdate 3703 && a_enmIntendedUse != kIemNativeGstRegUse_ForFullWrite) 3708 3704 Log12(("iemNativeRegAllocTmpForGuestReg: Duplicated %s for guest %s into %s for %s\n", 3709 3705 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName, 3710 g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[ enmIntendedUse]));3706 g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[a_enmIntendedUse])); 3711 3707 else 3712 3708 { … … 3714 3710 Log12(("iemNativeRegAllocTmpForGuestReg: Moved %s for guest %s into %s for %s\n", 3715 3711 g_apszIemNativeHstRegNames[idxReg], g_aGstShadowInfo[enmGstReg].pszName, 3716 g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[ enmIntendedUse]));3712 g_apszIemNativeHstRegNames[idxRegNew], s_pszIntendedUse[a_enmIntendedUse])); 3717 3713 } 3718 3714 idxReg = idxRegNew; 3719 3715 } 3720 Assert(RT_BIT_32(idxReg) & fRegMask); /* See assumption in fNoVolatileRegs docs. */3716 Assert(RT_BIT_32(idxReg) & a_fRegMask); /* See assumption in fNoVolatileRegs docs. */ 3721 3717 3722 3718 #ifdef VBOX_STRICT … … 3727 3723 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 3728 3724 /** @todo r=aeichner Implement for registers other than GPR as well. */ 3729 if ( (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite3730 ||enmIntendedUse == kIemNativeGstRegUse_ForUpdate)3731 &&( ( enmGstReg >= kIemNativeGstReg_GprFirst3725 if RT_CONSTEXPR_IF( a_enmIntendedUse == kIemNativeGstRegUse_ForFullWrite 3726 || a_enmIntendedUse == kIemNativeGstRegUse_ForUpdate) 3727 if ( ( enmGstReg >= kIemNativeGstReg_GprFirst 3732 3728 && enmGstReg <= kIemNativeGstReg_GprLast) 3733 || enmGstReg == kIemNativeGstReg_MxCsr)) 3729 || enmGstReg == kIemNativeGstReg_MxCsr) 3730 { 3731 # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 3732 iemNativeDbgInfoAddNativeOffset(pReNative, *poff); 3733 iemNativeDbgInfoAddGuestRegDirty(pReNative, false /*fSimdReg*/, enmGstReg, idxReg); 3734 # endif 3735 pReNative->Core.bmGstRegShadowDirty |= RT_BIT_64(enmGstReg); 3736 } 3737 #endif 3738 3739 return idxReg; 3740 } 3741 3742 /* 3743 * Allocate a new register, load it with the guest value and designate it as a copy of the 3744 */ 3745 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, a_fRegMask, 3746 a_enmIntendedUse == kIemNativeGstRegUse_Calculation); 3747 3748 if RT_CONSTEXPR_IF(a_enmIntendedUse != kIemNativeGstRegUse_ForFullWrite) 3749 *poff = iemNativeEmitLoadGprWithGstShadowReg(pReNative, *poff, idxRegNew, enmGstReg); 3750 3751 if RT_CONSTEXPR_IF(a_enmIntendedUse != kIemNativeGstRegUse_Calculation) 3752 iemNativeRegMarkAsGstRegShadow(pReNative, idxRegNew, enmGstReg, *poff); 3753 Log12(("iemNativeRegAllocTmpForGuestReg: Allocated %s for guest %s %s\n", 3754 g_apszIemNativeHstRegNames[idxRegNew], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[a_enmIntendedUse])); 3755 3756 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 3757 /** @todo r=aeichner Implement for registers other than GPR as well. */ 3758 if RT_CONSTEXPR_IF( a_enmIntendedUse == kIemNativeGstRegUse_ForFullWrite 3759 || a_enmIntendedUse == kIemNativeGstRegUse_ForUpdate) 3760 if ( ( enmGstReg >= kIemNativeGstReg_GprFirst 3761 && enmGstReg <= kIemNativeGstReg_GprLast) 3762 || enmGstReg == kIemNativeGstReg_MxCsr) 3734 3763 { 3735 3764 # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 3736 3765 iemNativeDbgInfoAddNativeOffset(pReNative, *poff); 3737 iemNativeDbgInfoAddGuestRegDirty(pReNative, false /*fSimdReg*/, enmGstReg, idxReg );3766 iemNativeDbgInfoAddGuestRegDirty(pReNative, false /*fSimdReg*/, enmGstReg, idxRegNew); 3738 3767 # endif 3739 3768 pReNative->Core.bmGstRegShadowDirty |= RT_BIT_64(enmGstReg); 3740 3769 } 3741 #endif3742 3743 return idxReg;3744 }3745 3746 /*3747 * Allocate a new register, load it with the guest value and designate it as a copy of the3748 */3749 uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask, enmIntendedUse == kIemNativeGstRegUse_Calculation);3750 3751 if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)3752 *poff = iemNativeEmitLoadGprWithGstShadowReg(pReNative, *poff, idxRegNew, enmGstReg);3753 3754 if (enmIntendedUse != kIemNativeGstRegUse_Calculation)3755 iemNativeRegMarkAsGstRegShadow(pReNative, idxRegNew, enmGstReg, *poff);3756 Log12(("iemNativeRegAllocTmpForGuestReg: Allocated %s for guest %s %s\n",3757 g_apszIemNativeHstRegNames[idxRegNew], g_aGstShadowInfo[enmGstReg].pszName, s_pszIntendedUse[enmIntendedUse]));3758 3759 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK3760 /** @todo r=aeichner Implement for registers other than GPR as well. */3761 if ( ( enmIntendedUse == kIemNativeGstRegUse_ForFullWrite3762 || enmIntendedUse == kIemNativeGstRegUse_ForUpdate)3763 && ( ( enmGstReg >= kIemNativeGstReg_GprFirst3764 && enmGstReg <= kIemNativeGstReg_GprLast)3765 || enmGstReg == kIemNativeGstReg_MxCsr))3766 {3767 # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO3768 iemNativeDbgInfoAddNativeOffset(pReNative, *poff);3769 iemNativeDbgInfoAddGuestRegDirty(pReNative, false /*fSimdReg*/, enmGstReg, idxRegNew);3770 # endif3771 pReNative->Core.bmGstRegShadowDirty |= RT_BIT_64(enmGstReg);3772 }3773 3770 #endif 3774 3771 … … 3793 3790 * the request. 3794 3791 * @param enmGstReg The guest register that will is to be updated. 3795 * @param enmIntendedUseHow the caller will be using the host register.3796 * @param fNoVolatileRegs Set if no volatile register allowed, clear if any3792 * @param a_enmIntendedUse How the caller will be using the host register. 3793 * @param a_fNonVolatileRegs Set if no volatile register allowed, clear if any 3797 3794 * register is okay (default). The ASSUMPTION here is 3798 3795 * that the caller has already flushed all volatile 3799 3796 * registers, so this is only applied if we allocate a 3800 3797 * new register. 3801 * @param fSkipLivenessAssert Hack for liveness input validation of EFLAGS.3802 3798 * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent 3803 3799 */ 3804 DECL_HIDDEN_THROW(uint8_t) 3805 iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg, 3806 IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/, 3807 bool fNoVolatileRegs /*= false*/, bool fSkipLivenessAssert /*= false*/) 3800 template<IEMNATIVEGSTREGUSE const a_enmIntendedUse, bool const a_fNonVolatileRegs> 3801 DECL_FORCE_INLINE_THROW(uint8_t) 3802 iemNativeRegAllocTmpForGuestRegInt(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3808 3803 { 3809 3804 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS 3810 AssertMsg( fSkipLivenessAssert 3811 || pReNative->idxCurCall == 0 3805 AssertMsg( pReNative->idxCurCall == 0 3812 3806 || enmGstReg == kIemNativeGstReg_Pc 3813 || ( enmIntendedUse == kIemNativeGstRegUse_ForFullWrite3807 || (a_enmIntendedUse == kIemNativeGstRegUse_ForFullWrite 3814 3808 ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) 3815 : enmIntendedUse == kIemNativeGstRegUse_ForUpdate3809 : a_enmIntendedUse == kIemNativeGstRegUse_ForUpdate 3816 3810 ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) 3817 3811 : IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) ), 3818 3812 ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))); 3819 3813 #endif 3820 RT_NOREF(fSkipLivenessAssert); 3821 3822 return iemNativeRegAllocTmpForGuestRegCommon(pReNative, poff, enmGstReg, enmIntendedUse, fNoVolatileRegs); 3823 } 3814 3815 if RT_CONSTEXPR_IF(!a_fNonVolatileRegs) 3816 return iemNativeRegAllocTmpForGuestRegCommon<a_enmIntendedUse, 3817 IEMNATIVE_HST_GREG_MASK 3818 & ~IEMNATIVE_REG_FIXED_MASK>(pReNative, poff, enmGstReg); 3819 else /* keep else, is required by MSC */ 3820 return iemNativeRegAllocTmpForGuestRegCommon<a_enmIntendedUse, 3821 IEMNATIVE_HST_GREG_MASK 3822 & ~IEMNATIVE_REG_FIXED_MASK 3823 & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK>(pReNative, poff, enmGstReg); 3824 } 3825 3826 /* Variants including volatile registers: */ 3827 3828 DECL_HIDDEN_THROW(uint8_t) 3829 iemNativeRegAllocTmpForGuestRegReadOnly(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3830 { 3831 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_ReadOnly, false>(pReNative, poff, enmGstReg); 3832 } 3833 3834 DECL_HIDDEN_THROW(uint8_t) 3835 iemNativeRegAllocTmpForGuestRegUpdate(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3836 { 3837 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_ForUpdate, false>(pReNative, poff, enmGstReg); 3838 } 3839 3840 DECL_HIDDEN_THROW(uint8_t) 3841 iemNativeRegAllocTmpForGuestRegFullWrite(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3842 { 3843 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_ForFullWrite, false>(pReNative, poff, enmGstReg); 3844 } 3845 3846 DECL_HIDDEN_THROW(uint8_t) 3847 iemNativeRegAllocTmpForGuestRegCalculation(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3848 { 3849 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_Calculation, false>(pReNative, poff, enmGstReg); 3850 } 3851 3852 /* Variants excluding any volatile registers: */ 3853 3854 DECL_HIDDEN_THROW(uint8_t) 3855 iemNativeRegAllocTmpForGuestRegReadOnlyNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3856 { 3857 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_ReadOnly, true>(pReNative, poff, enmGstReg); 3858 } 3859 3860 DECL_HIDDEN_THROW(uint8_t) 3861 iemNativeRegAllocTmpForGuestRegUpdateNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3862 { 3863 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_ForUpdate, true>(pReNative, poff, enmGstReg); 3864 } 3865 3866 DECL_HIDDEN_THROW(uint8_t) 3867 iemNativeRegAllocTmpForGuestRegFullWriteNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3868 { 3869 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_ForFullWrite, true>(pReNative, poff, enmGstReg); 3870 } 3871 3872 DECL_HIDDEN_THROW(uint8_t) 3873 iemNativeRegAllocTmpForGuestRegCalculationNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3874 { 3875 return iemNativeRegAllocTmpForGuestRegInt<kIemNativeGstRegUse_Calculation, true>(pReNative, poff, enmGstReg); 3876 } 3877 3824 3878 3825 3879 … … 3832 3886 * kIemNativeGstReg_EFlags as argument. 3833 3887 */ 3834 DECL_HIDDEN_THROW(uint8_t) 3835 iemNativeRegAllocTmpForGuestEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREGUSE enmIntendedUse, 3836 uint64_t fRead, uint64_t fWrite /*= 0*/, uint64_t fPotentialCall /*= 0*/) 3888 template<IEMNATIVEGSTREGUSE const a_enmIntendedUse> 3889 DECL_FORCE_INLINE_THROW(uint8_t) 3890 iemNativeRegAllocTmpForGuestEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t fRead, 3891 uint64_t fWrite /*= 0*/, uint64_t fPotentialCall /*= 0*/) 3837 3892 { 3838 3893 if (pReNative->idxCurCall != 0 && (fRead || fWrite /*|| fPotentialCall*/)) … … 3863 3918 } 3864 3919 RT_NOREF(fPotentialCall); 3865 return iemNativeRegAllocTmpForGuestRegCommon(pReNative, poff, kIemNativeGstReg_EFlags, 3866 enmIntendedUse, false /*fNoVolatileRegs*/); 3867 } 3920 3921 AssertCompile(a_enmIntendedUse == kIemNativeGstRegUse_ReadOnly || a_enmIntendedUse == kIemNativeGstRegUse_ForUpdate); 3922 if RT_CONSTEXPR_IF(a_enmIntendedUse == kIemNativeGstRegUse_ReadOnly) 3923 return iemNativeRegAllocTmpForGuestRegCommon<kIemNativeGstRegUse_ReadOnly, 3924 IEMNATIVE_CALL_VOLATILE_GREG_MASK 3925 & IEMNATIVE_HST_GREG_MASK 3926 & ~IEMNATIVE_REG_FIXED_MASK>(pReNative, poff, kIemNativeGstReg_EFlags); 3927 else /* keep else, is required by MSC */ 3928 return iemNativeRegAllocTmpForGuestRegCommon<kIemNativeGstRegUse_ForUpdate, 3929 IEMNATIVE_CALL_VOLATILE_GREG_MASK 3930 & IEMNATIVE_HST_GREG_MASK 3931 & ~IEMNATIVE_REG_FIXED_MASK>(pReNative, poff, kIemNativeGstReg_EFlags); 3932 } 3933 3934 3935 DECL_HIDDEN_THROW(uint8_t) 3936 iemNativeRegAllocTmpForGuestEFlagsReadOnly(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 3937 uint64_t fRead, uint64_t fWrite /*= 0*/, uint64_t fPotentialCall /*= 0*/) 3938 { 3939 return iemNativeRegAllocTmpForGuestEFlags<kIemNativeGstRegUse_ReadOnly>(pReNative, poff, fRead, fWrite, fPotentialCall); 3940 } 3941 3942 DECL_HIDDEN_THROW(uint8_t) 3943 iemNativeRegAllocTmpForGuestEFlagsForUpdate(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t fRead, 3944 uint64_t fWrite /*= 0*/, uint64_t fPotentialCall /*= 0*/) 3945 { 3946 return iemNativeRegAllocTmpForGuestEFlags<kIemNativeGstRegUse_ForUpdate>(pReNative, poff, fRead, fWrite, fPotentialCall); 3947 } 3948 3868 3949 #endif 3869 3950 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r106203 r106315 2230 2230 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm, 2231 2231 bool fPreferVolatile = true); 2232 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 2233 IEMNATIVEGSTREG enmGstReg, 2234 IEMNATIVEGSTREGUSE enmIntendedUse = kIemNativeGstRegUse_ReadOnly, 2235 bool fNoVolatileRegs = false, bool fSkipLivenessAssert = false); 2232 2233 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegReadOnly(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2234 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegUpdate(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2235 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegFullWrite(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2236 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegCalculation(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2237 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegReadOnlyNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2238 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegUpdateNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2239 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegFullWriteNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2240 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegCalculationNoVolatile(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg); 2241 2236 2242 #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && defined(VBOX_STRICT) 2237 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 2238 IEMNATIVEGSTREGUSE enmIntendedUse, uint64_t fRead, 2239 uint64_t fWrite = 0, uint64_t fPotentialCall = 0); 2240 #else 2241 DECL_FORCE_INLINE_THROW(uint8_t) 2242 iemNativeRegAllocTmpForGuestEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREGUSE enmIntendedUse, 2243 uint64_t fRead, uint64_t fWrite = 0, uint64_t fPotentialCall = 0) 2244 { 2245 RT_NOREF(fRead, fWrite, fPotentialCall); 2246 return iemNativeRegAllocTmpForGuestReg(pReNative, poff, kIemNativeGstReg_EFlags, enmIntendedUse); 2247 } 2243 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestEFlagsReadOnly(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 2244 uint64_t fRead, uint64_t fWrite = 0, uint64_t fPotentialCall = 0); 2245 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestEFlagsForUpdate(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 2246 uint64_t fRead, uint64_t fWrite = 0, uint64_t fPotentialCall = 0); 2248 2247 #endif 2249 2248 … … 2845 2844 *********************************************************************************************************************************/ 2846 2845 2846 #ifdef RT_ARCH_ARM64 2847 # include <iprt/armv8.h> 2848 #endif 2849 2850 2847 2851 /** 2848 2852 * Marks host register @a idxHstReg as containing a shadow copy of guest … … 3066 3070 3067 3071 3072 /** 3073 * Allocates a temporary host general purpose register for keeping a guest 3074 * register value. 3075 * 3076 * Since we may already have a register holding the guest register value, 3077 * code will be emitted to do the loading if that's not the case. Code may also 3078 * be emitted if we have to free up a register to satify the request. 3079 * 3080 * @returns The host register number; throws VBox status code on failure, so no 3081 * need to check the return value. 3082 * @param pReNative The native recompile state. 3083 * @param poff Pointer to the variable with the code buffer 3084 * position. This will be update if we need to move a 3085 * variable from register to stack in order to satisfy 3086 * the request. 3087 * @param enmGstReg The guest register that will is to be updated. 3088 * @param enmIntendedUse How the caller will be using the host register. 3089 * @param fNoVolatileRegs Set if no volatile register allowed, clear if any 3090 * register is okay (default). The ASSUMPTION here is 3091 * that the caller has already flushed all volatile 3092 * registers, so this is only applied if we allocate a 3093 * new register. 3094 * @sa iemNativeRegAllocTmpForGuestEFlags 3095 * iemNativeRegAllocTmpForGuestRegIfAlreadyPresent 3096 * iemNativeRegAllocTmpForGuestRegInt 3097 */ 3098 DECL_FORCE_INLINE_THROW(uint8_t) 3099 iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg, 3100 IEMNATIVEGSTREGUSE const enmIntendedUse = kIemNativeGstRegUse_ReadOnly, 3101 bool const fNoVolatileRegs = false) 3102 { 3103 if (enmIntendedUse == kIemNativeGstRegUse_ReadOnly) 3104 return !fNoVolatileRegs 3105 ? iemNativeRegAllocTmpForGuestRegReadOnly(pReNative, poff, enmGstReg) 3106 : iemNativeRegAllocTmpForGuestRegReadOnlyNoVolatile(pReNative, poff, enmGstReg); 3107 if (enmIntendedUse == kIemNativeGstRegUse_ForUpdate) 3108 return !fNoVolatileRegs 3109 ? iemNativeRegAllocTmpForGuestRegUpdate(pReNative, poff, enmGstReg) 3110 : iemNativeRegAllocTmpForGuestRegUpdateNoVolatile(pReNative, poff, enmGstReg); 3111 if (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite) 3112 return !fNoVolatileRegs 3113 ? iemNativeRegAllocTmpForGuestRegFullWrite(pReNative, poff, enmGstReg) 3114 : iemNativeRegAllocTmpForGuestRegFullWriteNoVolatile(pReNative, poff, enmGstReg); 3115 Assert(enmIntendedUse == kIemNativeGstRegUse_Calculation); 3116 return !fNoVolatileRegs 3117 ? iemNativeRegAllocTmpForGuestRegCalculation(pReNative, poff, enmGstReg) 3118 : iemNativeRegAllocTmpForGuestRegCalculationNoVolatile(pReNative, poff, enmGstReg); 3119 } 3120 3121 #if !defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) || !defined(VBOX_STRICT) 3122 3123 DECL_FORCE_INLINE_THROW(uint8_t) 3124 iemNativeRegAllocTmpForGuestEFlagsReadOnly(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t fRead, 3125 uint64_t fWrite = 0, uint64_t fPotentialCall = 0) 3126 { 3127 RT_NOREF(fRead, fWrite, fPotentialCall); 3128 return iemNativeRegAllocTmpForGuestRegReadOnly(pReNative, poff, kIemNativeGstReg_EFlags); 3129 } 3130 3131 DECL_FORCE_INLINE_THROW(uint8_t) 3132 iemNativeRegAllocTmpForGuestEFlagsForUpdate(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t fRead, 3133 uint64_t fWrite = 0, uint64_t fPotentialCall = 0) 3134 { 3135 RT_NOREF(fRead, fWrite, fPotentialCall); 3136 return iemNativeRegAllocTmpForGuestRegUpdate(pReNative, poff, kIemNativeGstReg_EFlags); 3137 } 3138 3139 #endif 3140 3141 3068 3142 3069 3143 /*********************************************************************************************************************************
Note:
See TracChangeset
for help on using the changeset viewer.