Changeset 106113 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 21, 2024 12:01:43 AM (4 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp
r106090 r106113 209 209 { 210 210 uint8_t const idxEflReg = !a_fCheckIrqs ? UINT8_MAX 211 : iemNativeRegAllocTmpForGuest Reg(pReNative, &off, kIemNativeGstReg_EFlags,212 kIemNativeGstRegUse_ReadOnly);211 : iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, 212 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER)); 213 213 uint8_t const idxTmpReg1 = iemNativeRegAllocTmp(pReNative, &off); 214 214 uint8_t const idxTmpReg2 = a_fCheckIrqs ? iemNativeRegAllocTmp(pReNative, &off) : UINT8_MAX; -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r106097 r106113 439 439 off = iemNativeRegFlushPendingWrites(pReNative, off); 440 440 441 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuest Reg(pReNative, &off, kIemNativeGstReg_EFlags,442 kIemNativeGstRegUse_ForUpdate, false /*fNoVolatileRegs*/,443 true /*fSkipLivenessAssert*/);441 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ForUpdate, 442 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER), 443 RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER)); 444 444 off = iemNativeEmitTestAnyBitsInGprAndTbExitIfAnySet(pReNative, off, idxEflReg, 445 445 X86_EFL_TF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK, … … 1795 1795 PUSH FS in real mode, so we have to try emulate that here. 1796 1796 We borrow the now unused idxReg1 from the TLB lookup code here. */ 1797 uint8_t idxRegEfl = iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(pReNative, &off,1798 kIemNativeGstReg_EFlags);1797 uint8_t const idxRegEfl = iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(pReNative, &off, 1798 kIemNativeGstReg_EFlags); 1799 1799 if (idxRegEfl != UINT8_MAX) 1800 1800 { … … 3348 3348 3349 3349 3350 /** 3351 * Helper function to convert X86_EFL_xxx masks to liveness masks. 3352 * 3353 * The compiler should be able to figure this out at compile time, so sprinkling 3354 * constexpr where ever possible here to nudge it along. 3355 */ 3356 template<uint32_t const a_fEfl> 3357 RT_CONSTEXPR uint64_t iemNativeEflagsToLivenessMask(void) 3358 { 3359 return (a_fEfl & ~X86_EFL_STATUS_BITS ? RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OTHER) : 0) 3360 | (a_fEfl & X86_EFL_CF ? RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_CF) : 0) 3361 | (a_fEfl & X86_EFL_PF ? RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_PF) : 0) 3362 | (a_fEfl & X86_EFL_AF ? RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_AF) : 0) 3363 | (a_fEfl & X86_EFL_ZF ? RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_ZF) : 0) 3364 | (a_fEfl & X86_EFL_SF ? RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_SF) : 0) 3365 | (a_fEfl & X86_EFL_OF ? RT_BIT_64(IEMLIVENESSBIT_IDX_EFL_OF) : 0); 3366 } 3367 3368 3369 /** 3370 * Helper function to convert a single X86_EFL_xxxx value to bit number. 3371 * 3372 * The compiler should be able to figure this out at compile time, so sprinkling 3373 * constexpr where ever possible here to nudge it along. 3374 */ 3375 template<uint32_t const a_fEfl> 3376 RT_CONSTEXPR unsigned iemNativeEflagsToSingleBitNo(void) 3377 { 3378 AssertCompile( a_fEfl == X86_EFL_CF 3379 || a_fEfl == X86_EFL_PF 3380 || a_fEfl == X86_EFL_AF 3381 || a_fEfl == X86_EFL_ZF 3382 || a_fEfl == X86_EFL_SF 3383 || a_fEfl == X86_EFL_OF 3384 || a_fEfl == X86_EFL_DF); 3385 return a_fEfl == X86_EFL_CF ? X86_EFL_CF_BIT 3386 : a_fEfl == X86_EFL_PF ? X86_EFL_PF_BIT 3387 : a_fEfl == X86_EFL_AF ? X86_EFL_AF_BIT 3388 : a_fEfl == X86_EFL_ZF ? X86_EFL_ZF_BIT 3389 : a_fEfl == X86_EFL_SF ? X86_EFL_SF_BIT 3390 : a_fEfl == X86_EFL_OF ? X86_EFL_OF_BIT 3391 : X86_EFL_DF_BIT; 3392 } 3393 3394 3350 3395 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) \ 3351 off = iemNativeEmitIfEflagAnysBitsSet(pReNative, off, (a_fBits) ); \3396 off = iemNativeEmitIfEflagAnysBitsSet(pReNative, off, (a_fBits), iemNativeEflagsToLivenessMask<a_fBits>()); \ 3352 3397 do { 3353 3398 3354 3399 /** Emits code for IEM_MC_IF_EFL_ANY_BITS_SET. */ 3355 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagAnysBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 3400 DECL_INLINE_THROW(uint32_t) 3401 iemNativeEmitIfEflagAnysBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl, uint64_t fLivenessEflBits) 3356 3402 { 3357 3403 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl); … … 3359 3405 3360 3406 /* Get the eflags. */ 3361 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3362 kIemNativeGstRegUse_ReadOnly); 3407 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits); 3363 3408 3364 3409 /* Test and jump. */ … … 3376 3421 3377 3422 #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) \ 3378 off = iemNativeEmitIfEflagNoBitsSet(pReNative, off, (a_fBits) ); \3423 off = iemNativeEmitIfEflagNoBitsSet(pReNative, off, (a_fBits), iemNativeEflagsToLivenessMask<a_fBits>()); \ 3379 3424 do { 3380 3425 3381 3426 /** Emits code for IEM_MC_IF_EFL_NO_BITS_SET. */ 3382 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagNoBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 3427 DECL_INLINE_THROW(uint32_t) 3428 iemNativeEmitIfEflagNoBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl, uint64_t fLivenessEflBits) 3383 3429 { 3384 3430 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl); … … 3386 3432 3387 3433 /* Get the eflags. */ 3388 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3389 kIemNativeGstRegUse_ReadOnly); 3434 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits); 3390 3435 3391 3436 /* Test and jump. */ … … 3403 3448 3404 3449 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) \ 3405 off = iemNativeEmitIfEflagsBitSet(pReNative, off, (a_fBit)); \ 3450 off = iemNativeEmitIfEflagsBitSet(pReNative, off, iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3451 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3406 3452 do { 3407 3453 3408 3454 /** Emits code for IEM_MC_IF_EFL_BIT_SET. */ 3409 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 3410 { 3411 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 3455 DECL_INLINE_THROW(uint32_t) 3456 iemNativeEmitIfEflagsBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, unsigned iBitNo, uint64_t fLivenessEflBit) 3457 { 3458 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, RT_BIT_32(iBitNo)); 3412 3459 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3413 3460 3414 3461 /* Get the eflags. */ 3415 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3416 kIemNativeGstRegUse_ReadOnly); 3417 3418 unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1; 3419 Assert(RT_BIT_32(iBitNo) == fBitInEfl); 3462 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBit); 3420 3463 3421 3464 /* Test and jump. */ … … 3433 3476 3434 3477 #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) \ 3435 off = iemNativeEmitIfEflagsBitNotSet(pReNative, off, (a_fBit)); \ 3478 off = iemNativeEmitIfEflagsBitNotSet(pReNative, off, iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3479 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3436 3480 do { 3437 3481 3438 3482 /** Emits code for IEM_MC_IF_EFL_BIT_NOT_SET. */ 3439 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 3440 { 3441 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 3483 DECL_INLINE_THROW(uint32_t) 3484 iemNativeEmitIfEflagsBitNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, unsigned iBitNo, uint64_t fLivenessEflBit) 3485 { 3486 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, RT_BIT_32(iBitNo)); 3442 3487 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3443 3488 3444 3489 /* Get the eflags. */ 3445 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3446 kIemNativeGstRegUse_ReadOnly); 3447 3448 unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1; 3449 Assert(RT_BIT_32(iBitNo) == fBitInEfl); 3490 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBit); 3450 3491 3451 3492 /* Test and jump. */ … … 3462 3503 3463 3504 3464 #define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \ 3465 off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, a_fBit1, a_fBit2, false /*fInverted*/); \ 3505 #define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \ 3506 off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, false /*fInverted*/, \ 3507 iemNativeEflagsToSingleBitNo<a_fBit1>(), \ 3508 iemNativeEflagsToSingleBitNo<a_fBit2>(), \ 3509 iemNativeEflagsToLivenessMask<a_fBit1 | a_fBit2>()); \ 3466 3510 do { 3467 3511 3468 #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \ 3469 off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, a_fBit1, a_fBit2, true /*fInverted*/); \ 3512 #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \ 3513 off = iemNativeEmitIfEflagsTwoBitsEqual(pReNative, off, true /*fInverted*/, \ 3514 iemNativeEflagsToSingleBitNo<a_fBit1>(), \ 3515 iemNativeEflagsToSingleBitNo<a_fBit2>(), \ 3516 iemNativeEflagsToLivenessMask<a_fBit1 | a_fBit2>()); \ 3470 3517 do { 3471 3518 … … 3473 3520 DECL_INLINE_THROW(uint32_t) 3474 3521 iemNativeEmitIfEflagsTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3475 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 3476 { 3477 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBit1InEfl | fBit2InEfl); 3522 bool fInverted, unsigned iBitNo1, unsigned iBitNo2, uint64_t fLivenessEflBits) 3523 { 3524 Assert(iBitNo1 != iBitNo2); 3525 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, RT_BIT_32(iBitNo1) | RT_BIT_32(iBitNo2)); 3478 3526 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3479 3527 3480 3528 /* Get the eflags. */ 3481 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3482 kIemNativeGstRegUse_ReadOnly); 3483 3484 unsigned const iBitNo1 = ASMBitFirstSetU32(fBit1InEfl) - 1; 3485 Assert(RT_BIT_32(iBitNo1) == fBit1InEfl); 3486 3487 unsigned const iBitNo2 = ASMBitFirstSetU32(fBit2InEfl) - 1; 3488 Assert(RT_BIT_32(iBitNo2) == fBit2InEfl); 3489 Assert(iBitNo1 != iBitNo2); 3529 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits); 3490 3530 3491 3531 #ifdef RT_ARCH_AMD64 3492 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBit1InEfl);3532 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, RT_BIT_64(iBitNo1)); 3493 3533 3494 3534 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxTmpReg, idxEflReg); … … 3536 3576 3537 3577 #define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \ 3538 off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, a_fBit, a_fBit1, a_fBit2, false /*fInverted*/); \ 3578 off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, false /*fInverted*/, \ 3579 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3580 iemNativeEflagsToSingleBitNo<a_fBit1>(), \ 3581 iemNativeEflagsToSingleBitNo<a_fBit2>(), \ 3582 iemNativeEflagsToLivenessMask<a_fBit | a_fBit1 | a_fBit2>()); \ 3539 3583 do { 3540 3584 3541 3585 #define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \ 3542 off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, a_fBit, a_fBit1, a_fBit2, true /*fInverted*/); \ 3586 off = iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(pReNative, off, true /*fInverted*/, \ 3587 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3588 iemNativeEflagsToSingleBitNo<a_fBit1>(), \ 3589 iemNativeEflagsToSingleBitNo<a_fBit2>(), \ 3590 iemNativeEflagsToLivenessMask<a_fBit | a_fBit1 | a_fBit2>()); \ 3543 3591 do { 3544 3592 … … 3546 3594 * IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE. */ 3547 3595 DECL_INLINE_THROW(uint32_t) 3548 iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl, 3549 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 3550 { 3551 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl | fBit1InEfl | fBit2InEfl); 3596 iemNativeEmitIfEflagsBitNotSetAndTwoBitsEqual(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool fInverted, 3597 unsigned iBitNo, unsigned iBitNo1, unsigned iBitNo2, uint64_t fLivenessEflBits) 3598 { 3599 Assert(iBitNo1 != iBitNo); 3600 Assert(iBitNo2 != iBitNo); 3601 Assert(iBitNo2 != iBitNo1); 3602 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, RT_BIT_32(iBitNo) | RT_BIT_32(iBitNo1) | RT_BIT_32(iBitNo2)); 3552 3603 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3553 3604 … … 3557 3608 3558 3609 /* Get the eflags. */ 3559 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3560 kIemNativeGstRegUse_ReadOnly); 3561 3562 /* Translate the flag masks to bit numbers. */ 3563 unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1; 3564 Assert(RT_BIT_32(iBitNo) == fBitInEfl); 3565 3566 unsigned const iBitNo1 = ASMBitFirstSetU32(fBit1InEfl) - 1; 3567 Assert(RT_BIT_32(iBitNo1) == fBit1InEfl); 3568 Assert(iBitNo1 != iBitNo); 3569 3570 unsigned const iBitNo2 = ASMBitFirstSetU32(fBit2InEfl) - 1; 3571 Assert(RT_BIT_32(iBitNo2) == fBit2InEfl); 3572 Assert(iBitNo2 != iBitNo); 3573 Assert(iBitNo2 != iBitNo1); 3610 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBits); 3574 3611 3575 3612 #ifdef RT_ARCH_AMD64 3576 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBit1InEfl); /* This must come before we jump anywhere! */3613 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, RT_BIT_64(iBitNo1)); /* This must come before we jump anywhere! */ 3577 3614 #elif defined(RT_ARCH_ARM64) 3578 3615 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); … … 3723 3760 3724 3761 #define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \ 3725 off = iemNativeEmitIfCxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/); \ 3762 off = iemNativeEmitIfCxIsNotOneAndTestEflagsBit(pReNative, off, true /*fCheckIfSet*/, \ 3763 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3764 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3726 3765 do { 3727 3766 3728 3767 #define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \ 3729 off = iemNativeEmitIfCxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/); \ 3768 off = iemNativeEmitIfCxIsNotOneAndTestEflagsBit(pReNative, off, false /*fCheckIfSet*/, \ 3769 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3770 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3730 3771 do { 3731 3772 … … 3733 3774 * IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET. */ 3734 3775 DECL_INLINE_THROW(uint32_t) 3735 iemNativeEmitIfCxIsNotOneAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl, bool fCheckIfSet) 3736 { 3737 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 3776 iemNativeEmitIfCxIsNotOneAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3777 bool fCheckIfSet, unsigned iBitNo, uint64_t fLivenessEflBit) 3778 { 3779 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, RT_BIT_32(iBitNo)); 3738 3780 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3739 3781 … … 3742 3784 register allocator state. 3743 3785 Doing EFLAGS first as it's more likely to be loaded, right? */ 3744 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3745 kIemNativeGstRegUse_ReadOnly); 3786 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEflBit); 3746 3787 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), 3747 3788 kIemNativeGstRegUse_ReadOnly); … … 3760 3801 3761 3802 /* Check the EFlags bit. */ 3762 unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;3763 Assert(RT_BIT_32(iBitNo) == fBitInEfl);3764 3803 off = iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse, 3765 3804 !fCheckIfSet /*fJmpIfSet*/); … … 3774 3813 3775 3814 #define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \ 3776 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/, false /*f64Bit*/); \ 3815 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, true /*fCheckIfSet*/, false /*f64Bit*/, \ 3816 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3817 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3777 3818 do { 3778 3819 3779 3820 #define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \ 3780 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/, false /*f64Bit*/); \ 3821 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, false /*fCheckIfSet*/, false /*f64Bit*/, \ 3822 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3823 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3781 3824 do { 3782 3825 3783 3826 #define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \ 3784 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/, true /*f64Bit*/); \ 3827 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, true /*fCheckIfSet*/, true /*f64Bit*/, \ 3828 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3829 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3785 3830 do { 3786 3831 3787 3832 #define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \ 3788 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/, true /*f64Bit*/); \ 3833 off = iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(pReNative, off, false /*fCheckIfSet*/, true /*f64Bit*/, \ 3834 iemNativeEflagsToSingleBitNo<a_fBit>(), \ 3835 iemNativeEflagsToLivenessMask<a_fBit>()); \ 3789 3836 do { 3790 3837 … … 3794 3841 * IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET. */ 3795 3842 DECL_INLINE_THROW(uint32_t) 3796 iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, 3797 uint32_t fBitInEfl, bool fCheckIfSet, bool f64Bit) 3798 { 3799 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 3843 iemNativeEmitIfRcxEcxIsNotOneAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool fCheckIfSet, bool f64Bit, 3844 unsigned iBitNo, uint64_t fLivenessEFlBit) 3845 3846 { 3847 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, RT_BIT_32(iBitNo)); 3800 3848 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3801 3849 … … 3804 3852 register allocator state. 3805 3853 Doing EFLAGS first as it's more likely to be loaded, right? */ 3806 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 3807 kIemNativeGstRegUse_ReadOnly); 3854 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ReadOnly, fLivenessEFlBit); 3808 3855 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), 3809 3856 kIemNativeGstRegUse_ReadOnly); … … 3819 3866 3820 3867 /* Check the EFlags bit. */ 3821 unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;3822 Assert(RT_BIT_32(iBitNo) == fBitInEfl);3823 3868 off = iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse, 3824 3869 !fCheckIfSet /*fJmpIfSet*/); … … 5845 5890 #undef IEM_MC_FETCH_EFLAGS /* should not be used */ 5846 5891 #define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) \ 5847 off = iemNativeEmitFetchEFlags(pReNative, off, a_EFlags, a_fEflInput, a_fEflOutput) 5892 off = iemNativeEmitFetchEFlags<a_fEflInput, iemNativeEflagsToLivenessMask<a_fEflInput>(),\ 5893 a_fEflOutput, iemNativeEflagsToLivenessMask<a_fEflOutput>()>(pReNative, off, a_EFlags) 5848 5894 5849 5895 /** Handles IEM_MC_FETCH_EFLAGS_EX. */ 5850 DECL_INLINE_THROW(uint32_t) 5851 iemNativeEmitFetchEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags, 5852 uint32_t fEflInput, uint32_t fEflOutput) 5896 template<uint32_t const a_fEflInput, uint64_t const a_fLivenessEflInput, 5897 uint32_t const a_fEflOutput, uint64_t const a_fLivenessEflOutput> 5898 DECL_INLINE_THROW(uint32_t) 5899 iemNativeEmitFetchEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags) 5853 5900 { 5854 5901 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarEFlags); 5855 5902 IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarEFlags, sizeof(uint32_t)); 5856 RT_NOREF(fEflInput, fEflOutput);5903 /** @todo fix NOT AssertCompile(a_fEflInput != 0 || a_fEflOutput != 0); */ 5857 5904 5858 5905 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS 5859 5906 # ifdef VBOX_STRICT 5860 5907 if ( pReNative->idxCurCall != 0 5861 && ( fEflInput != 0 ||fEflOutput != 0) /* for NOT these are both zero for now. */)5862 { 5863 PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall - 1];5864 uint32_t const fBoth = fEflInput |fEflOutput;5908 && (a_fEflInput != 0 || a_fEflOutput != 0) /* for NOT these are both zero for now. */) 5909 { 5910 PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall - 1]; 5911 RT_CONSTEXPR uint32_t const fBoth = a_fEflInput | a_fEflOutput; 5865 5912 # define ASSERT_ONE_EFL(a_fElfConst, a_idxField) \ 5866 5913 AssertMsg( !(fBoth & (a_fElfConst)) \ 5867 || (!( fEflInput & (a_fElfConst)) \5914 || (!(a_fEflInput & (a_fElfConst)) \ 5868 5915 ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) \ 5869 : !( fEflOutput & (a_fElfConst)) \5916 : !(a_fEflOutput & (a_fElfConst)) \ 5870 5917 ? IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) \ 5871 5918 : IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) ), \ … … 5883 5930 #endif 5884 5931 5885 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fEflInput);5932 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, a_fEflInput); 5886 5933 5887 5934 /** @todo This could be prettier...*/ … … 5896 5943 * zero, but since iemNativeVarRegisterSet clears the shadowing, 5897 5944 * that's counter productive... */ 5898 uint8_t const idxGstReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags, 5899 kIemNativeGstRegUse_ForUpdate, false /*fNoVolatileRegs*/, 5900 true /** @todo EFlags shadowing+liveness weirdness (@bugref{10720}). */); 5945 uint8_t const idxGstReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ForUpdate, 5946 a_fLivenessEflInput, a_fLivenessEflOutput); 5901 5947 iemNativeVarRegisterSet(pReNative, idxVarEFlags, idxGstReg, off, true /*fAllocated*/); 5902 5948 } … … 5905 5951 /* Register argument variable: Avoid assertions in generic call code and load it the traditional way. */ 5906 5952 uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVarEFlags, &off, false /*fInitialized*/); 5907 uint8_t const idxGstReg = iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(pReNative, &off, kIemNativeGstReg_EFlags); 5953 uint8_t const idxGstReg = iemNativeRegAllocTmpForGuestEFlagsIfAlreadyPresent(pReNative, &off, 5954 a_fLivenessEflInput, a_fLivenessEflOutput); 5908 5955 if (idxGstReg != UINT8_MAX) 5909 5956 { … … 5926 5973 #define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) \ 5927 5974 IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \ 5928 off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput, true /*fUpdateSkipping*/) 5975 off = iemNativeEmitCommitEFlags<true /*fUpdateSkipping*/, a_fEflOutput, \ 5976 iemNativeEflagsToLivenessMask<a_fEflInput>(), \ 5977 iemNativeEflagsToLivenessMask<a_fEflOutput>()>(pReNative, off, a_EFlags) 5929 5978 5930 5979 #undef IEM_MC_COMMIT_EFLAGS_OPT /* should not be used */ 5931 5980 #define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) \ 5932 5981 IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \ 5933 off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput, false /*fUpdateSkipping*/) 5982 off = iemNativeEmitCommitEFlags<false /*fUpdateSkipping*/, a_fEflOutput, \ 5983 iemNativeEflagsToLivenessMask<a_fEflInput>(), \ 5984 iemNativeEflagsToLivenessMask<a_fEflOutput>()>(pReNative, off, a_EFlags) 5934 5985 5935 5986 /** Handles IEM_MC_COMMIT_EFLAGS_EX. */ 5936 DECL_INLINE_THROW(uint32_t) 5937 iemNativeEmitCommitEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags, uint32_t fEflOutput, 5938 bool fUpdateSkipping) 5939 { 5940 RT_NOREF(fEflOutput); 5987 template<bool const a_fUpdateSkipping, uint32_t const a_fEflOutput, 5988 uint64_t const a_fLivenessEflInputBits, uint64_t const a_fLivenessEflOutputBits> 5989 DECL_INLINE_THROW(uint32_t) iemNativeEmitCommitEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags) 5990 { 5941 5991 uint8_t const idxReg = iemNativeVarRegisterAcquire(pReNative, idxVarEFlags, &off, true /*fInitialized*/); 5942 5992 IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarEFlags, sizeof(uint32_t)); 5993 5994 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS 5995 # ifdef VBOX_STRICT 5996 if ( pReNative->idxCurCall != 0 5997 && (a_fLivenessEflInputBits != 0 || a_fLivenessEflOutputBits != 0) /* for NOT these are both zero for now. */) 5998 { 5999 PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall - 1]; 6000 # define ASSERT_ONE_EFL(a_idxField) \ 6001 if RT_CONSTEXPR_IF(((a_fLivenessEflInputBits | a_fLivenessEflOutputBits) & RT_BIT_64(a_idxField)) != 0) \ 6002 AssertMsg(!(a_fLivenessEflInputBits & RT_BIT_64(a_idxField)) \ 6003 ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) \ 6004 : !(a_fLivenessEflOutputBits & RT_BIT_64(a_idxField)) \ 6005 ? IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)) \ 6006 : IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField)), \ 6007 ("%s - %u\n", #a_idxField, iemNativeLivenessGetStateByGstRegEx(pLivenessEntry, a_idxField))) 6008 ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_OTHER); 6009 ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_CF); 6010 ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_PF); 6011 ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_AF); 6012 ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_ZF); 6013 ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_SF); 6014 ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_OF); 6015 # undef ASSERT_ONE_EFL 6016 } 6017 # endif 6018 #endif 5943 6019 5944 6020 #ifdef VBOX_STRICT … … 5955 6031 iemNativeFixupFixedJump(pReNative, offFixup, off); 5956 6032 5957 /** @todo validate that only bits in the fElfOutput mask changed. */6033 /** @todo validate that only bits in the a_fEflOutput mask changed. */ 5958 6034 #endif 5959 6035 5960 6036 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 5961 if (fUpdateSkipping)5962 { 5963 if ((fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS)6037 if RT_CONSTEXPR_IF(a_fUpdateSkipping) 6038 { 6039 if RT_CONSTEXPR_IF((a_fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS) 5964 6040 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 5965 6041 else 5966 off = iemNativeEmitAndImmIntoVCpuU32(pReNative, off, ~( fEflOutput & X86_EFL_STATUS_BITS),6042 off = iemNativeEmitAndImmIntoVCpuU32(pReNative, off, ~(a_fEflOutput & X86_EFL_STATUS_BITS), 5967 6043 RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 5968 6044 } 5969 #else5970 RT_NOREF_PV(fUpdateSkipping);5971 6045 #endif 5972 6046 … … 5986 6060 5987 6061 #define IEM_MC_SET_EFL_BIT(a_fBit) \ 5988 off = iemNativeEmitModifyEFlagsBit<kIemNativeEmitEflOp_Set >(pReNative, off, a_fBit)6062 off = iemNativeEmitModifyEFlagsBit<kIemNativeEmitEflOp_Set, a_fBit, iemNativeEflagsToLivenessMask<a_fBit>()>(pReNative, off) 5989 6063 5990 6064 #define IEM_MC_CLEAR_EFL_BIT(a_fBit) \ 5991 off = iemNativeEmitModifyEFlagsBit<kIemNativeEmitEflOp_Clear >(pReNative, off, a_fBit)6065 off = iemNativeEmitModifyEFlagsBit<kIemNativeEmitEflOp_Clear, a_fBit, iemNativeEflagsToLivenessMask<a_fBit>()>(pReNative, off) 5992 6066 5993 6067 #define IEM_MC_FLIP_EFL_BIT(a_fBit) \ 5994 off = iemNativeEmitModifyEFlagsBit<kIemNativeEmitEflOp_Flip >(pReNative, off, a_fBit)6068 off = iemNativeEmitModifyEFlagsBit<kIemNativeEmitEflOp_Flip, a_fBit, iemNativeEflagsToLivenessMask<a_fBit>()>(pReNative, off) 5995 6069 5996 6070 /** Handles IEM_MC_SET_EFL_BIT/IEM_MC_CLEAR_EFL_BIT/IEM_MC_FLIP_EFL_BIT. */ 5997 template<IEMNATIVEMITEFLOP const a_enmOp >5998 DECL_INLINE_THROW(uint32_t) iemNativeEmitModifyEFlagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off , uint32_t fEflBit)5999 { 6000 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuest Reg(pReNative, &off, kIemNativeGstReg_EFlags,6001 kIemNativeGstRegUse_ForUpdate, false /*fNoVolatileRegs*/,6002 true /*fSkipLivenessAssert*/); /** @todo proper liveness / eflags fix */6071 template<IEMNATIVEMITEFLOP const a_enmOp, uint32_t const a_fEflBit, uint64_t const a_fLivenessEflBit> 6072 DECL_INLINE_THROW(uint32_t) iemNativeEmitModifyEFlagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off) 6073 { 6074 uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestEFlags(pReNative, &off, kIemNativeGstRegUse_ForUpdate, 6075 a_enmOp == kIemNativeEmitEflOp_Flip ? a_fLivenessEflBit : 0, 6076 a_fLivenessEflBit); 6003 6077 6004 6078 /* Using 'if constexpr' forces code elimination in debug builds with VC. */ 6005 6079 if RT_CONSTEXPR_IF(a_enmOp == kIemNativeEmitEflOp_Set) 6006 off = iemNativeEmitOrGpr32ByImm(pReNative, off, idxEflReg, fEflBit);6080 off = iemNativeEmitOrGpr32ByImm(pReNative, off, idxEflReg, a_fEflBit); 6007 6081 else if RT_CONSTEXPR_IF(a_enmOp == kIemNativeEmitEflOp_Clear) 6008 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxEflReg, ~ fEflBit);6082 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxEflReg, ~a_fEflBit); 6009 6083 else if RT_CONSTEXPR_IF(a_enmOp == kIemNativeEmitEflOp_Flip) 6010 off = iemNativeEmitXorGpr32ByImm(pReNative, off, idxEflReg, fEflBit);6084 off = iemNativeEmitXorGpr32ByImm(pReNative, off, idxEflReg, a_fEflBit); 6011 6085 else 6012 6086 AssertCompile( a_enmOp == kIemNativeEmitEflOp_Set /* AssertCompile(false) works with VC 2019 but not clang 15. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r106099 r106113 3567 3567 3568 3568 /** 3569 * Allocates a temporary host general purpose register for keeping a guest 3570 * register value. 3571 * 3572 * Since we may already have a register holding the guest register value, 3573 * code will be emitted to do the loading if that's not the case. Code may also 3574 * be emitted if we have to free up a register to satify the request. 3575 * 3576 * @returns The host register number; throws VBox status code on failure, so no 3577 * need to check the return value. 3578 * @param pReNative The native recompile state. 3579 * @param poff Pointer to the variable with the code buffer 3580 * position. This will be update if we need to move a 3581 * variable from register to stack in order to satisfy 3582 * the request. 3583 * @param enmGstReg The guest register that will is to be updated. 3584 * @param enmIntendedUse How the caller will be using the host register. 3585 * @param fNoVolatileRegs Set if no volatile register allowed, clear if any 3586 * register is okay (default). The ASSUMPTION here is 3587 * that the caller has already flushed all volatile 3588 * registers, so this is only applied if we allocate a 3589 * new register. 3590 * @param fSkipLivenessAssert Hack for liveness input validation of EFLAGS. 3591 * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent 3592 */ 3593 DECL_HIDDEN_THROW(uint8_t) 3594 iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg, 3595 IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/, 3596 bool fNoVolatileRegs /*= false*/, bool fSkipLivenessAssert /*= false*/) 3569 * Common worker for iemNativeRegAllocTmpForGuestReg() and 3570 * iemNativeRegAllocTmpForGuestEFlags(). 3571 * 3572 * See iemNativeRegAllocTmpForGuestReg() for details. 3573 */ 3574 static uint8_t 3575 iemNativeRegAllocTmpForGuestRegCommon(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg, 3576 IEMNATIVEGSTREGUSE enmIntendedUse, bool fNoVolatileRegs) 3597 3577 { 3598 3578 Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0); 3599 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS3600 AssertMsg( fSkipLivenessAssert3601 || pReNative->idxCurCall == 03602 || enmGstReg == kIemNativeGstReg_Pc3603 || (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite3604 ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))3605 : enmIntendedUse == kIemNativeGstRegUse_ForUpdate3606 ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))3607 : IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) ),3608 ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));3609 #endif3610 RT_NOREF(fSkipLivenessAssert);3611 3579 #if defined(LOG_ENABLED) || defined(VBOX_STRICT) 3612 3580 static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" }; … … 3786 3754 3787 3755 /** 3788 * Allocates a temporary host general purpose register that already holds the 3789 * given guest register value. 3790 * 3791 * The use case for this function is places where the shadowing state cannot be 3792 * modified due to branching and such. This will fail if the we don't have a 3793 * current shadow copy handy or if it's incompatible. The only code that will 3794 * be emitted here is value checking code in strict builds. 3795 * 3796 * The intended use can only be readonly! 3797 * 3798 * @returns The host register number, UINT8_MAX if not present. 3756 * Allocates a temporary host general purpose register for keeping a guest 3757 * register value. 3758 * 3759 * Since we may already have a register holding the guest register value, 3760 * code will be emitted to do the loading if that's not the case. Code may also 3761 * be emitted if we have to free up a register to satify the request. 3762 * 3763 * @returns The host register number; throws VBox status code on failure, so no 3764 * need to check the return value. 3799 3765 * @param pReNative The native recompile state. 3800 * @param poff Pointer to the instruction buffer offset. 3801 * Will be updated in strict builds if a register is 3802 * found. 3766 * @param poff Pointer to the variable with the code buffer 3767 * position. This will be update if we need to move a 3768 * variable from register to stack in order to satisfy 3769 * the request. 3803 3770 * @param enmGstReg The guest register that will is to be updated. 3804 * @note In strict builds, this may throw instruction buffer growth failures. 3805 * Non-strict builds will not throw anything. 3806 * @sa iemNativeRegAllocTmpForGuestReg 3771 * @param enmIntendedUse How the caller will be using the host register. 3772 * @param fNoVolatileRegs Set if no volatile register allowed, clear if any 3773 * register is okay (default). The ASSUMPTION here is 3774 * that the caller has already flushed all volatile 3775 * registers, so this is only applied if we allocate a 3776 * new register. 3777 * @param fSkipLivenessAssert Hack for liveness input validation of EFLAGS. 3778 * @sa iemNativeRegAllocTmpForGuestRegIfAlreadyPresent 3807 3779 */ 3808 3780 DECL_HIDDEN_THROW(uint8_t) 3809 iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3781 iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg, 3782 IEMNATIVEGSTREGUSE enmIntendedUse /*= kIemNativeGstRegUse_ReadOnly*/, 3783 bool fNoVolatileRegs /*= false*/, bool fSkipLivenessAssert /*= false*/) 3784 { 3785 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS 3786 AssertMsg( fSkipLivenessAssert 3787 || pReNative->idxCurCall == 0 3788 || enmGstReg == kIemNativeGstReg_Pc 3789 || (enmIntendedUse == kIemNativeGstRegUse_ForFullWrite 3790 ? IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) 3791 : enmIntendedUse == kIemNativeGstRegUse_ForUpdate 3792 ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) 3793 : IEMLIVENESS_STATE_IS_INPUT_EXPECTED( iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) ), 3794 ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))); 3795 #endif 3796 RT_NOREF(fSkipLivenessAssert); 3797 3798 return iemNativeRegAllocTmpForGuestRegCommon(pReNative, poff, enmGstReg, enmIntendedUse, fNoVolatileRegs); 3799 } 3800 3801 3802 #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && defined(VBOX_STRICT) 3803 /** 3804 * Specialized version of iemNativeRegAllocTmpForGuestReg for EFLAGS. 3805 * 3806 * This takes additional arguments for covering liveness assertions in strict 3807 * builds, it's otherwise the same as iemNativeRegAllocTmpForGuestReg() with 3808 * kIemNativeGstReg_EFlags as argument. 3809 */ 3810 DECL_HIDDEN_THROW(uint8_t) 3811 iemNativeRegAllocTmpForGuestEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREGUSE enmIntendedUse, 3812 uint64_t fRead, uint64_t fWrite /*= 0*/, uint64_t fPotentialCall /*= 0*/) 3813 { 3814 if (pReNative->idxCurCall != 0 && (fRead || fWrite /*|| fPotentialCall*/)) 3815 { 3816 Assert(!(fRead & ~IEMLIVENESSBIT_ALL_EFL_MASK)); 3817 Assert(!(fWrite & ~IEMLIVENESSBIT_ALL_EFL_MASK)); 3818 Assert(!(fPotentialCall & ~IEMLIVENESSBIT_ALL_EFL_MASK)); 3819 uint64_t const fAll = fRead | fWrite /*| fPotentialCall*/; 3820 uint32_t fState; 3821 # define MY_ASSERT_ONE_EFL(a_enmGstEfl) \ 3822 fState = iemNativeLivenessGetPrevStateByGstRegEx(pReNative, (IEMNATIVEGSTREG)(a_enmGstEfl)); \ 3823 AssertMsg( !( fAll & RT_BIT_64(a_enmGstEfl)) \ 3824 || ( fRead & RT_BIT_64(a_enmGstEfl) \ 3825 ? fWrite & RT_BIT_64(a_enmGstEfl) \ 3826 ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED(fState) \ 3827 : IEMLIVENESS_STATE_IS_INPUT_EXPECTED(fState) \ 3828 : IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(fState) \ 3829 ) \ 3830 , ("%s - %u\n", #a_enmGstEfl, fState)) 3831 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_OTHER); 3832 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_CF); 3833 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_PF); 3834 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_AF); 3835 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_ZF); 3836 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_SF); 3837 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_OF); 3838 # undef MY_ASSERT_ONE_EFL 3839 } 3840 RT_NOREF(fPotentialCall); 3841 return iemNativeRegAllocTmpForGuestRegCommon(pReNative, poff, kIemNativeGstReg_EFlags, 3842 enmIntendedUse, false /*fNoVolatileRegs*/); 3843 } 3844 #endif 3845 3846 3847 3848 /** 3849 * Common worker for iemNativeRegAllocTmpForGuestRegIfAlreadyPresent and 3850 * iemNativeRegAllocTmpForGuestEFlagsIfAlreadyPresent. 3851 * 3852 * See iemNativeRegAllocTmpForGuestRegIfAlreadyPresent() for details. 3853 */ 3854 DECL_FORCE_INLINE(uint8_t) 3855 iemNativeRegAllocTmpForGuestRegIfAlreadyPresentCommon(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3810 3856 { 3811 3857 Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0); 3812 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS3813 AssertMsg( pReNative->idxCurCall == 03814 || IEMLIVENESS_STATE_IS_INPUT_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))3815 || enmGstReg == kIemNativeGstReg_Pc3816 || enmGstReg == kIemNativeGstReg_EFlags /** @todo EFlags shadowing+liveness is weird and needs fixing (@bugref{10720}) */,3817 ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)));3818 #endif3819 3858 3820 3859 /* … … 3852 3891 return UINT8_MAX; 3853 3892 } 3893 3894 3895 /** 3896 * Allocates a temporary host general purpose register that already holds the 3897 * given guest register value. 3898 * 3899 * The use case for this function is places where the shadowing state cannot be 3900 * modified due to branching and such. This will fail if the we don't have a 3901 * current shadow copy handy or if it's incompatible. The only code that will 3902 * be emitted here is value checking code in strict builds. 3903 * 3904 * The intended use can only be readonly! 3905 * 3906 * @returns The host register number, UINT8_MAX if not present. 3907 * @param pReNative The native recompile state. 3908 * @param poff Pointer to the instruction buffer offset. 3909 * Will be updated in strict builds if a register is 3910 * found. 3911 * @param enmGstReg The guest register that will is to be updated. 3912 * @note In strict builds, this may throw instruction buffer growth failures. 3913 * Non-strict builds will not throw anything. 3914 * @sa iemNativeRegAllocTmpForGuestReg 3915 */ 3916 DECL_HIDDEN_THROW(uint8_t) 3917 iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg) 3918 { 3919 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS 3920 AssertMsg( pReNative->idxCurCall == 0 3921 || IEMLIVENESS_STATE_IS_INPUT_EXPECTED(iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg)) 3922 || enmGstReg == kIemNativeGstReg_Pc 3923 , ("%s - %u\n", g_aGstShadowInfo[enmGstReg].pszName, iemNativeLivenessGetPrevStateByGstReg(pReNative, enmGstReg))); 3924 #endif 3925 return iemNativeRegAllocTmpForGuestRegIfAlreadyPresentCommon(pReNative, poff, enmGstReg); 3926 } 3927 3928 3929 #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && defined(VBOX_STRICT) 3930 /** 3931 * Specialized version of iemNativeRegAllocTmpForGuestRegIfAlreadyPresent for 3932 * EFLAGS. 3933 * 3934 * This takes additional arguments for covering liveness assertions in strict 3935 * builds, it's otherwise the same as 3936 * iemNativeRegAllocTmpForGuestRegIfAlreadyPresent() with 3937 * kIemNativeGstReg_EFlags as argument. 3938 * 3939 * @note The @a fWrite parameter is necessary to complete the liveness picture, 3940 * as iemNativeEmitFetchEFlags() may fetch flags in prep for a later 3941 * commit. It the operation clobbers all the flags, @a fRead will be 3942 * zero, so better verify the whole picture while we're here. 3943 */ 3944 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestEFlagsIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 3945 uint64_t fRead, uint64_t fWrite /*=0*/) 3946 { 3947 if (pReNative->idxCurCall != 0) 3948 { 3949 Assert(fRead | fWrite); 3950 Assert(!(fRead & ~IEMLIVENESSBIT_ALL_EFL_MASK)); 3951 Assert(!(fWrite & ~IEMLIVENESSBIT_ALL_EFL_MASK)); 3952 uint64_t const fAll = fRead | fWrite; 3953 uint32_t fState; 3954 # define MY_ASSERT_ONE_EFL(a_enmGstEfl) \ 3955 fState = iemNativeLivenessGetPrevStateByGstRegEx(pReNative, (IEMNATIVEGSTREG)(a_enmGstEfl)); \ 3956 AssertMsg( !( fAll & RT_BIT_64(a_enmGstEfl)) \ 3957 || ( fRead & RT_BIT_64(a_enmGstEfl) \ 3958 ? fWrite & RT_BIT_64(a_enmGstEfl) \ 3959 ? IEMLIVENESS_STATE_IS_MODIFY_EXPECTED(fState) \ 3960 : IEMLIVENESS_STATE_IS_INPUT_EXPECTED(fState) \ 3961 : IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(fState) \ 3962 ) \ 3963 , ("%s - %u\n", #a_enmGstEfl, fState)) 3964 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_OTHER); 3965 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_CF); 3966 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_PF); 3967 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_AF); 3968 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_ZF); 3969 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_SF); 3970 MY_ASSERT_ONE_EFL(IEMLIVENESSBIT_IDX_EFL_OF); 3971 # undef MY_ASSERT_ONE_EFL 3972 } 3973 RT_NOREF(fRead); 3974 return iemNativeRegAllocTmpForGuestRegIfAlreadyPresentCommon(pReNative, poff, kIemNativeGstReg_EFlags); 3975 } 3976 #endif 3854 3977 3855 3978 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r106101 r106113 1822 1822 IEMNATIVEGSTREGUSE enmIntendedUse = kIemNativeGstRegUse_ReadOnly, 1823 1823 bool fNoVolatileRegs = false, bool fSkipLivenessAssert = false); 1824 #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && defined(VBOX_STRICT) 1825 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 1826 IEMNATIVEGSTREGUSE enmIntendedUse, uint64_t fRead, 1827 uint64_t fWrite = 0, uint64_t fPotentialCall = 0); 1828 #else 1829 DECL_FORCE_INLINE_THROW(uint8_t) 1830 iemNativeRegAllocTmpForGuestEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREGUSE enmIntendedUse, 1831 uint64_t fRead, uint64_t fWrite = 0, uint64_t fPotentialCall = 0) 1832 { 1833 RT_NOREF(fRead, fWrite, fPotentialCall); 1834 return iemNativeRegAllocTmpForGuestReg(pReNative, poff, kIemNativeGstReg_EFlags, enmIntendedUse); 1835 } 1836 #endif 1837 1824 1838 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 1825 1839 IEMNATIVEGSTREG enmGstReg); 1840 #if defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && defined(VBOX_STRICT) 1841 DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestEFlagsIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 1842 uint64_t fRead, uint64_t fWrite = 0); 1843 #else 1844 DECL_FORCE_INLINE_THROW(uint8_t) 1845 iemNativeRegAllocTmpForGuestEFlagsIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, 1846 uint64_t fRead, uint64_t fWrite = 0) 1847 { 1848 RT_NOREF(fRead, fWrite); 1849 return iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(pReNative, poff, kIemNativeGstReg_EFlags); 1850 } 1851 #endif 1826 1852 1827 1853 DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs); … … 2352 2378 } 2353 2379 2354 2355 2380 # ifdef VBOX_STRICT 2381 2356 2382 /** For assertions only - caller checks that idxCurCall isn't zero. */ 2357 2383 DECL_FORCE_INLINE(uint32_t) … … 2360 2386 return iemNativeLivenessGetStateByGstReg(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], enmGstReg); 2361 2387 } 2388 2389 2390 /** For assertions only - caller checks that idxCurCall isn't zero. */ 2391 DECL_FORCE_INLINE(uint32_t) 2392 iemNativeLivenessGetPrevStateByGstRegEx(PIEMRECOMPILERSTATE pReNative, IEMNATIVEGSTREG enmGstReg) 2393 { 2394 return iemNativeLivenessGetStateByGstRegEx(&pReNative->paLivenessEntries[pReNative->idxCurCall - 1], enmGstReg); 2395 } 2396 2362 2397 # endif /* VBOX_STRICT */ 2363 2364 2398 #endif /* IEMNATIVE_WITH_LIVENESS_ANALYSIS */ 2365 2399
Note:
See TracChangeset
for help on using the changeset viewer.