Changeset 36834 in vbox
- Timestamp:
- Apr 24, 2011 11:04:52 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 71393
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r36824 r36834 592 592 IEMInternal.o \ 593 593 IEMAllInstructions.cpp.o IEMAllInstructions.cpp.obj \ 594 IEMAllCImpl.cpp.o IEMAllCImpl.cpp.obj \ 594 595 IEMAllCImplStrInstr.cpp.o IEMAllCImplStrInstr.cpp.obj: IEMAll.o 595 596 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r36833 r36834 3468 3468 3469 3469 3470 /** @name Misc Helpers3471 * @{3472 */3473 3474 /**3475 * Checks if we are allowed to access the given I/O port, raising the3476 * appropriate exceptions if we aren't (or if the I/O bitmap is not3477 * accessible).3478 *3479 * @returns Strict VBox status code.3480 *3481 * @param pIemCpu The IEM per CPU data.3482 * @param pCtx The register context.3483 * @param u16Port The port number.3484 * @param cbOperand The operand size.3485 */3486 DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)3487 {3488 if ( (pCtx->cr0 & X86_CR0_PE)3489 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL3490 || pCtx->eflags.Bits.u1VM) )3491 {3492 /** @todo I/O port permission bitmap check */3493 AssertFailedReturn(VERR_NOT_IMPLEMENTED);3494 }3495 return VINF_SUCCESS;3496 }3497 3498 /** @} */3499 3500 3501 /** @name C Implementations3502 * @{3503 */3504 3505 /**3506 * Implements a 16-bit popa.3507 */3508 IEM_CIMPL_DEF_0(iemCImpl_popa_16)3509 {3510 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3511 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);3512 RTGCPTR GCPtrLast = GCPtrStart + 15;3513 VBOXSTRICTRC rcStrict;3514 3515 /*3516 * The docs are a bit hard to comprehend here, but it looks like we wrap3517 * around in real mode as long as none of the individual "popa" crosses the3518 * end of the stack segment. In protected mode we check the whole access3519 * in one go. For efficiency, only do the word-by-word thing if we're in3520 * danger of wrapping around.3521 */3522 /** @todo do popa boundary / wrap-around checks. */3523 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)3524 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */3525 {3526 /* word-by-word */3527 RTUINT64U TmpRsp;3528 TmpRsp.u = pCtx->rsp;3529 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);3530 if (rcStrict == VINF_SUCCESS)3531 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);3532 if (rcStrict == VINF_SUCCESS)3533 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);3534 if (rcStrict == VINF_SUCCESS)3535 {3536 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */3537 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);3538 }3539 if (rcStrict == VINF_SUCCESS)3540 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);3541 if (rcStrict == VINF_SUCCESS)3542 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);3543 if (rcStrict == VINF_SUCCESS)3544 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);3545 if (rcStrict == VINF_SUCCESS)3546 {3547 pCtx->rsp = TmpRsp.u;3548 iemRegAddToRip(pIemCpu, cbInstr);3549 }3550 }3551 else3552 {3553 uint16_t const *pa16Mem = NULL;3554 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);3555 if (rcStrict == VINF_SUCCESS)3556 {3557 pCtx->di = pa16Mem[7 - X86_GREG_xDI];3558 pCtx->si = pa16Mem[7 - X86_GREG_xSI];3559 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];3560 /* skip sp */3561 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];3562 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];3563 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];3564 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];3565 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);3566 if (rcStrict == VINF_SUCCESS)3567 {3568 iemRegAddToRsp(pCtx, 16);3569 iemRegAddToRip(pIemCpu, cbInstr);3570 }3571 }3572 }3573 return rcStrict;3574 }3575 3576 3577 /**3578 * Implements a 32-bit popa.3579 */3580 IEM_CIMPL_DEF_0(iemCImpl_popa_32)3581 {3582 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3583 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);3584 RTGCPTR GCPtrLast = GCPtrStart + 31;3585 VBOXSTRICTRC rcStrict;3586 3587 /*3588 * The docs are a bit hard to comprehend here, but it looks like we wrap3589 * around in real mode as long as none of the individual "popa" crosses the3590 * end of the stack segment. In protected mode we check the whole access3591 * in one go. For efficiency, only do the word-by-word thing if we're in3592 * danger of wrapping around.3593 */3594 /** @todo do popa boundary / wrap-around checks. */3595 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)3596 && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */3597 {3598 /* word-by-word */3599 RTUINT64U TmpRsp;3600 TmpRsp.u = pCtx->rsp;3601 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);3602 if (rcStrict == VINF_SUCCESS)3603 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);3604 if (rcStrict == VINF_SUCCESS)3605 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);3606 if (rcStrict == VINF_SUCCESS)3607 {3608 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */3609 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);3610 }3611 if (rcStrict == VINF_SUCCESS)3612 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);3613 if (rcStrict == VINF_SUCCESS)3614 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);3615 if (rcStrict == VINF_SUCCESS)3616 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);3617 if (rcStrict == VINF_SUCCESS)3618 {3619 #if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */3620 pCtx->rdi &= UINT32_MAX;3621 pCtx->rsi &= UINT32_MAX;3622 pCtx->rbp &= UINT32_MAX;3623 pCtx->rbx &= UINT32_MAX;3624 pCtx->rdx &= UINT32_MAX;3625 pCtx->rcx &= UINT32_MAX;3626 pCtx->rax &= UINT32_MAX;3627 #endif3628 pCtx->rsp = TmpRsp.u;3629 iemRegAddToRip(pIemCpu, cbInstr);3630 }3631 }3632 else3633 {3634 uint32_t const *pa32Mem;3635 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);3636 if (rcStrict == VINF_SUCCESS)3637 {3638 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];3639 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];3640 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];3641 /* skip esp */3642 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];3643 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];3644 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];3645 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];3646 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);3647 if (rcStrict == VINF_SUCCESS)3648 {3649 iemRegAddToRsp(pCtx, 32);3650 iemRegAddToRip(pIemCpu, cbInstr);3651 }3652 }3653 }3654 return rcStrict;3655 }3656 3657 3658 /**3659 * Implements a 16-bit pusha.3660 */3661 IEM_CIMPL_DEF_0(iemCImpl_pusha_16)3662 {3663 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3664 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);3665 RTGCPTR GCPtrBottom = GCPtrTop - 15;3666 VBOXSTRICTRC rcStrict;3667 3668 /*3669 * The docs are a bit hard to comprehend here, but it looks like we wrap3670 * around in real mode as long as none of the individual "pushd" crosses the3671 * end of the stack segment. In protected mode we check the whole access3672 * in one go. For efficiency, only do the word-by-word thing if we're in3673 * danger of wrapping around.3674 */3675 /** @todo do pusha boundary / wrap-around checks. */3676 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop3677 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )3678 {3679 /* word-by-word */3680 RTUINT64U TmpRsp;3681 TmpRsp.u = pCtx->rsp;3682 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);3683 if (rcStrict == VINF_SUCCESS)3684 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);3685 if (rcStrict == VINF_SUCCESS)3686 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);3687 if (rcStrict == VINF_SUCCESS)3688 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);3689 if (rcStrict == VINF_SUCCESS)3690 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);3691 if (rcStrict == VINF_SUCCESS)3692 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);3693 if (rcStrict == VINF_SUCCESS)3694 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);3695 if (rcStrict == VINF_SUCCESS)3696 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);3697 if (rcStrict == VINF_SUCCESS)3698 {3699 pCtx->rsp = TmpRsp.u;3700 iemRegAddToRip(pIemCpu, cbInstr);3701 }3702 }3703 else3704 {3705 GCPtrBottom--;3706 uint16_t *pa16Mem = NULL;3707 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);3708 if (rcStrict == VINF_SUCCESS)3709 {3710 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;3711 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;3712 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;3713 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;3714 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;3715 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;3716 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;3717 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;3718 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);3719 if (rcStrict == VINF_SUCCESS)3720 {3721 iemRegSubFromRsp(pCtx, 16);3722 iemRegAddToRip(pIemCpu, cbInstr);3723 }3724 }3725 }3726 return rcStrict;3727 }3728 3729 3730 /**3731 * Implements a 32-bit pusha.3732 */3733 IEM_CIMPL_DEF_0(iemCImpl_pusha_32)3734 {3735 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3736 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);3737 RTGCPTR GCPtrBottom = GCPtrTop - 31;3738 VBOXSTRICTRC rcStrict;3739 3740 /*3741 * The docs are a bit hard to comprehend here, but it looks like we wrap3742 * around in real mode as long as none of the individual "pusha" crosses the3743 * end of the stack segment. In protected mode we check the whole access3744 * in one go. For efficiency, only do the word-by-word thing if we're in3745 * danger of wrapping around.3746 */3747 /** @todo do pusha boundary / wrap-around checks. */3748 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop3749 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )3750 {3751 /* word-by-word */3752 RTUINT64U TmpRsp;3753 TmpRsp.u = pCtx->rsp;3754 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);3755 if (rcStrict == VINF_SUCCESS)3756 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);3757 if (rcStrict == VINF_SUCCESS)3758 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);3759 if (rcStrict == VINF_SUCCESS)3760 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);3761 if (rcStrict == VINF_SUCCESS)3762 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);3763 if (rcStrict == VINF_SUCCESS)3764 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);3765 if (rcStrict == VINF_SUCCESS)3766 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);3767 if (rcStrict == VINF_SUCCESS)3768 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);3769 if (rcStrict == VINF_SUCCESS)3770 {3771 pCtx->rsp = TmpRsp.u;3772 iemRegAddToRip(pIemCpu, cbInstr);3773 }3774 }3775 else3776 {3777 GCPtrBottom--;3778 uint32_t *pa32Mem;3779 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);3780 if (rcStrict == VINF_SUCCESS)3781 {3782 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;3783 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;3784 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;3785 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;3786 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;3787 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;3788 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;3789 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;3790 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);3791 if (rcStrict == VINF_SUCCESS)3792 {3793 iemRegSubFromRsp(pCtx, 32);3794 iemRegAddToRip(pIemCpu, cbInstr);3795 }3796 }3797 }3798 return rcStrict;3799 }3800 3801 3802 /**3803 * Implements pushf.3804 *3805 *3806 * @param enmEffOpSize The effective operand size.3807 */3808 IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)3809 {3810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3811 3812 /*3813 * If we're in V8086 mode some care is required (which is why we're in3814 * doing this in a C implementation).3815 */3816 uint32_t fEfl = pCtx->eflags.u;3817 if ( (fEfl & X86_EFL_VM)3818 && X86_EFL_GET_IOPL(fEfl) != 3 )3819 {3820 Assert(pCtx->cr0 & X86_CR0_PE);3821 if ( enmEffOpSize != IEMMODE_16BIT3822 || !(pCtx->cr4 & X86_CR4_VME))3823 return iemRaiseGeneralProtectionFault0(pIemCpu);3824 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */3825 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);3826 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);3827 }3828 3829 /*3830 * Ok, clear RF and VM and push the flags.3831 */3832 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);3833 3834 VBOXSTRICTRC rcStrict;3835 switch (enmEffOpSize)3836 {3837 case IEMMODE_16BIT:3838 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);3839 break;3840 case IEMMODE_32BIT:3841 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);3842 break;3843 case IEMMODE_64BIT:3844 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);3845 break;3846 IEM_NOT_REACHED_DEFAULT_CASE_RET();3847 }3848 if (rcStrict != VINF_SUCCESS)3849 return rcStrict;3850 3851 iemRegAddToRip(pIemCpu, cbInstr);3852 return VINF_SUCCESS;3853 }3854 3855 3856 /**3857 * Implements popf.3858 *3859 * @param enmEffOpSize The effective operand size.3860 */3861 IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)3862 {3863 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3864 uint32_t const fEflOld = pCtx->eflags.u;3865 VBOXSTRICTRC rcStrict;3866 uint32_t fEflNew;3867 3868 /*3869 * V8086 is special as usual.3870 */3871 if (fEflOld & X86_EFL_VM)3872 {3873 /*3874 * Almost anything goes if IOPL is 3.3875 */3876 if (X86_EFL_GET_IOPL(fEflOld) == 3)3877 {3878 switch (enmEffOpSize)3879 {3880 case IEMMODE_16BIT:3881 {3882 uint16_t u16Value;3883 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);3884 if (rcStrict != VINF_SUCCESS)3885 return rcStrict;3886 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));3887 break;3888 }3889 case IEMMODE_32BIT:3890 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);3891 if (rcStrict != VINF_SUCCESS)3892 return rcStrict;3893 break;3894 IEM_NOT_REACHED_DEFAULT_CASE_RET();3895 }3896 3897 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);3898 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;3899 }3900 /*3901 * Interrupt flag virtualization with CR4.VME=1.3902 */3903 else if ( enmEffOpSize == IEMMODE_16BIT3904 && (pCtx->cr4 & X86_CR4_VME) )3905 {3906 uint16_t u16Value;3907 RTUINT64U TmpRsp;3908 TmpRsp.u = pCtx->rsp;3909 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);3910 if (rcStrict != VINF_SUCCESS)3911 return rcStrict;3912 3913 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP3914 * or before? */3915 if ( ( (u16Value & X86_EFL_IF)3916 && (fEflOld & X86_EFL_VIP))3917 || (u16Value & X86_EFL_TF) )3918 return iemRaiseGeneralProtectionFault0(pIemCpu);3919 3920 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);3921 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);3922 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);3923 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;3924 3925 pCtx->rsp = TmpRsp.u;3926 }3927 else3928 return iemRaiseGeneralProtectionFault0(pIemCpu);3929 3930 }3931 /*3932 * Not in V8086 mode.3933 */3934 else3935 {3936 /* Pop the flags. */3937 switch (enmEffOpSize)3938 {3939 case IEMMODE_16BIT:3940 {3941 uint16_t u16Value;3942 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);3943 if (rcStrict != VINF_SUCCESS)3944 return rcStrict;3945 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));3946 break;3947 }3948 case IEMMODE_32BIT:3949 case IEMMODE_64BIT:3950 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);3951 if (rcStrict != VINF_SUCCESS)3952 return rcStrict;3953 break;3954 IEM_NOT_REACHED_DEFAULT_CASE_RET();3955 }3956 3957 /* Merge them with the current flags. */3958 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))3959 || pIemCpu->uCpl == 0)3960 {3961 fEflNew &= X86_EFL_POPF_BITS;3962 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;3963 }3964 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))3965 {3966 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);3967 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;3968 }3969 else3970 {3971 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);3972 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;3973 }3974 }3975 3976 /*3977 * Commit the flags.3978 */3979 Assert(fEflNew & RT_BIT_32(1));3980 pCtx->eflags.u = fEflNew;3981 iemRegAddToRip(pIemCpu, cbInstr);3982 3983 return VINF_SUCCESS;3984 }3985 3986 3987 /**3988 * Implements an indirect call.3989 *3990 * @param uNewPC The new program counter (RIP) value (loaded from the3991 * operand).3992 * @param enmEffOpSize The effective operand size.3993 */3994 IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)3995 {3996 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3997 uint16_t uOldPC = pCtx->ip + cbInstr;3998 if (uNewPC > pCtx->csHid.u32Limit)3999 return iemRaiseGeneralProtectionFault0(pIemCpu);4000 4001 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);4002 if (rcStrict != VINF_SUCCESS)4003 return rcStrict;4004 4005 pCtx->rip = uNewPC;4006 return VINF_SUCCESS;4007 4008 }4009 4010 4011 /**4012 * Implements a 16-bit relative call.4013 *4014 * @param offDisp The displacment offset.4015 */4016 IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)4017 {4018 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4019 uint16_t uOldPC = pCtx->ip + cbInstr;4020 uint16_t uNewPC = uOldPC + offDisp;4021 if (uNewPC > pCtx->csHid.u32Limit)4022 return iemRaiseGeneralProtectionFault0(pIemCpu);4023 4024 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);4025 if (rcStrict != VINF_SUCCESS)4026 return rcStrict;4027 4028 pCtx->rip = uNewPC;4029 return VINF_SUCCESS;4030 }4031 4032 4033 /**4034 * Implements a 32-bit indirect call.4035 *4036 * @param uNewPC The new program counter (RIP) value (loaded from the4037 * operand).4038 * @param enmEffOpSize The effective operand size.4039 */4040 IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)4041 {4042 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4043 uint32_t uOldPC = pCtx->eip + cbInstr;4044 if (uNewPC > pCtx->csHid.u32Limit)4045 return iemRaiseGeneralProtectionFault0(pIemCpu);4046 4047 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);4048 if (rcStrict != VINF_SUCCESS)4049 return rcStrict;4050 4051 pCtx->rip = uNewPC;4052 return VINF_SUCCESS;4053 4054 }4055 4056 4057 /**4058 * Implements a 32-bit relative call.4059 *4060 * @param offDisp The displacment offset.4061 */4062 IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)4063 {4064 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4065 uint32_t uOldPC = pCtx->eip + cbInstr;4066 uint32_t uNewPC = uOldPC + offDisp;4067 if (uNewPC > pCtx->csHid.u32Limit)4068 return iemRaiseGeneralProtectionFault0(pIemCpu);4069 4070 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);4071 if (rcStrict != VINF_SUCCESS)4072 return rcStrict;4073 4074 pCtx->rip = uNewPC;4075 return VINF_SUCCESS;4076 }4077 4078 4079 /**4080 * Implements a 64-bit indirect call.4081 *4082 * @param uNewPC The new program counter (RIP) value (loaded from the4083 * operand).4084 * @param enmEffOpSize The effective operand size.4085 */4086 IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)4087 {4088 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4089 uint64_t uOldPC = pCtx->rip + cbInstr;4090 if (!IEM_IS_CANONICAL(uNewPC))4091 return iemRaiseGeneralProtectionFault0(pIemCpu);4092 4093 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);4094 if (rcStrict != VINF_SUCCESS)4095 return rcStrict;4096 4097 pCtx->rip = uNewPC;4098 return VINF_SUCCESS;4099 4100 }4101 4102 4103 /**4104 * Implements a 64-bit relative call.4105 *4106 * @param offDisp The displacment offset.4107 */4108 IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)4109 {4110 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4111 uint64_t uOldPC = pCtx->rip + cbInstr;4112 uint64_t uNewPC = uOldPC + offDisp;4113 if (!IEM_IS_CANONICAL(uNewPC))4114 return iemRaiseNotCanonical(pIemCpu);4115 4116 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);4117 if (rcStrict != VINF_SUCCESS)4118 return rcStrict;4119 4120 pCtx->rip = uNewPC;4121 return VINF_SUCCESS;4122 }4123 4124 4125 /**4126 * Implements far jumps.4127 *4128 * @param uSel The selector.4129 * @param offSeg The segment offset.4130 */4131 IEM_CIMPL_DEF_2(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg)4132 {4133 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4134 4135 /*4136 * Real mode and V8086 mode are easy. The only snag seems to be that4137 * CS.limit doesn't change and the limit check is done against the current4138 * limit.4139 */4140 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT4141 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))4142 {4143 if (offSeg > pCtx->csHid.u32Limit)4144 return iemRaiseGeneralProtectionFault0(pIemCpu);4145 4146 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */4147 pCtx->rip = offSeg;4148 else4149 pCtx->rip = offSeg & UINT16_MAX;4150 pCtx->cs = uSel;4151 pCtx->csHid.u64Base = (uint32_t)uSel << 4;4152 /** @todo REM reset the accessed bit (see on jmp far16 after disabling4153 * PE. Check with VT-x and AMD-V. */4154 #ifdef IEM_VERIFICATION_MODE4155 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;4156 #endif4157 return VINF_SUCCESS;4158 }4159 4160 /*4161 * Protected mode. Need to parse the specified descriptor...4162 */4163 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))4164 {4165 Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));4166 return iemRaiseGeneralProtectionFault0(pIemCpu);4167 }4168 4169 /* Fetch the descriptor. */4170 IEMSELDESC Desc;4171 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);4172 if (rcStrict != VINF_SUCCESS)4173 return rcStrict;4174 4175 /* Is it there? */4176 if (!Desc.Legacy.Gen.u1Present)4177 {4178 Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));4179 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);4180 }4181 4182 /*4183 * Deal with it according to its type.4184 */4185 if (Desc.Legacy.Gen.u1DescType)4186 {4187 /* Only code segments. */4188 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))4189 {4190 Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));4191 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4192 }4193 4194 /* L vs D. */4195 if ( Desc.Legacy.Gen.u1Long4196 && Desc.Legacy.Gen.u1DefBig4197 && IEM_IS_LONG_MODE(pIemCpu))4198 {4199 Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));4200 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4201 }4202 4203 /* DPL/RPL/CPL check, where conforming segments makes a difference. */4204 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))4205 {4206 if (Desc.Legacy.Gen.u2Dpl > pIemCpu->uCpl)4207 {4208 Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",4209 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));4210 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4211 }4212 }4213 else4214 {4215 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)4216 {4217 Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));4218 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4219 }4220 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)4221 {4222 Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));4223 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4224 }4225 }4226 4227 /* Limit check. (Should alternatively check for non-canonical addresses4228 here, but that is ruled out by offSeg being 32-bit, right?) */4229 uint64_t u64Base;4230 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);4231 if (Desc.Legacy.Gen.u1Granularity)4232 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;4233 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)4234 u64Base = 0;4235 else4236 {4237 if (offSeg > cbLimit)4238 {4239 Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));4240 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4241 }4242 u64Base = X86DESC_BASE(Desc.Legacy);4243 }4244 4245 /*4246 * Ok, everything checked out fine. Now set the accessed bit before4247 * committing the result into CS, CSHID and RIP.4248 */4249 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4250 {4251 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);4252 if (rcStrict != VINF_SUCCESS)4253 return rcStrict;4254 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4255 }4256 4257 /* commit */4258 pCtx->rip = offSeg;4259 pCtx->cs = uSel & (X86_SEL_MASK | X86_SEL_LDT);4260 pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */4261 pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);4262 pCtx->csHid.u32Limit = cbLimit;4263 pCtx->csHid.u64Base = u64Base;4264 /** @todo check if the hidden bits are loaded correctly for 64-bit4265 * mode. */4266 return VINF_SUCCESS;4267 }4268 4269 /*4270 * System selector.4271 */4272 if (IEM_IS_LONG_MODE(pIemCpu))4273 switch (Desc.Legacy.Gen.u4Type)4274 {4275 case AMD64_SEL_TYPE_SYS_LDT:4276 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:4277 case AMD64_SEL_TYPE_SYS_TSS_BUSY:4278 case AMD64_SEL_TYPE_SYS_CALL_GATE:4279 case AMD64_SEL_TYPE_SYS_INT_GATE:4280 case AMD64_SEL_TYPE_SYS_TRAP_GATE:4281 /* Call various functions to do the work. */4282 AssertFailedReturn(VERR_NOT_IMPLEMENTED);4283 default:4284 Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));4285 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4286 4287 }4288 switch (Desc.Legacy.Gen.u4Type)4289 {4290 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:4291 case X86_SEL_TYPE_SYS_LDT:4292 case X86_SEL_TYPE_SYS_286_CALL_GATE:4293 case X86_SEL_TYPE_SYS_TASK_GATE:4294 case X86_SEL_TYPE_SYS_286_INT_GATE:4295 case X86_SEL_TYPE_SYS_286_TRAP_GATE:4296 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:4297 case X86_SEL_TYPE_SYS_386_CALL_GATE:4298 case X86_SEL_TYPE_SYS_386_INT_GATE:4299 case X86_SEL_TYPE_SYS_386_TRAP_GATE:4300 /* Call various functions to do the work. */4301 AssertFailedReturn(VERR_NOT_IMPLEMENTED);4302 4303 case X86_SEL_TYPE_SYS_286_TSS_BUSY:4304 case X86_SEL_TYPE_SYS_386_TSS_BUSY:4305 /* Call various functions to do the work. */4306 AssertFailedReturn(VERR_NOT_IMPLEMENTED);4307 4308 default:4309 Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));4310 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4311 }4312 }4313 4314 4315 /**4316 * Implements far calls.4317 *4318 * @param uSel The selector.4319 * @param offSeg The segment offset.4320 * @param enmOpSize The operand size (in case we need it).4321 */4322 IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)4323 {4324 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4325 VBOXSTRICTRC rcStrict;4326 uint64_t uNewRsp;4327 void *pvRet;4328 4329 /*4330 * Real mode and V8086 mode are easy. The only snag seems to be that4331 * CS.limit doesn't change and the limit check is done against the current4332 * limit.4333 */4334 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT4335 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))4336 {4337 Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);4338 4339 /* Check stack first - may #SS(0). */4340 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmOpSize == IEMMODE_32BIT ? 6 : 4,4341 &pvRet, &uNewRsp);4342 if (rcStrict != VINF_SUCCESS)4343 return rcStrict;4344 4345 /* Check the target address range. */4346 if (offSeg > UINT32_MAX)4347 return iemRaiseGeneralProtectionFault0(pIemCpu);4348 4349 /* Everything is fine, push the return address. */4350 if (enmOpSize == IEMMODE_16BIT)4351 {4352 ((uint16_t *)pvRet)[0] = pCtx->ip + cbInstr;4353 ((uint16_t *)pvRet)[1] = pCtx->cs;4354 }4355 else4356 {4357 ((uint32_t *)pvRet)[0] = pCtx->eip + cbInstr;4358 ((uint16_t *)pvRet)[3] = pCtx->cs;4359 }4360 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);4361 if (rcStrict != VINF_SUCCESS)4362 return rcStrict;4363 4364 /* Branch. */4365 pCtx->rip = offSeg;4366 pCtx->cs = uSel;4367 pCtx->csHid.u64Base = (uint32_t)uSel << 4;4368 /** @todo Does REM reset the accessed bit here to? (See on jmp far164369 * after disabling PE.) Check with VT-x and AMD-V. */4370 #ifdef IEM_VERIFICATION_MODE4371 pCtx->csHid.Attr.u &= ~X86_SEL_TYPE_ACCESSED;4372 #endif4373 return VINF_SUCCESS;4374 }4375 4376 AssertFailedReturn(VERR_NOT_IMPLEMENTED);4377 }4378 4379 4380 /**4381 * Implements retf.4382 *4383 * @param enmEffOpSize The effective operand size.4384 * @param cbPop The amount of arguments to pop from the stack4385 * (bytes).4386 */4387 IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)4388 {4389 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4390 VBOXSTRICTRC rcStrict;4391 uint64_t uNewRsp;4392 4393 /*4394 * Real mode and V8086 mode are easy.4395 */4396 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT4397 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))4398 {4399 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);4400 uint16_t const *pu16Frame;4401 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,4402 (void const **)&pu16Frame, &uNewRsp);4403 if (rcStrict != VINF_SUCCESS)4404 return rcStrict;4405 uint32_t uNewEip;4406 uint16_t uNewCs;4407 if (enmEffOpSize == IEMMODE_32BIT)4408 {4409 uNewCs = pu16Frame[2];4410 uNewEip = RT_MAKE_U32(pu16Frame[0], pu16Frame[1]);4411 }4412 else4413 {4414 uNewCs = pu16Frame[1];4415 uNewEip = pu16Frame[0];4416 }4417 /** @todo check how this is supposed to work if sp=0xfffe. */4418 4419 /* Check the limit of the new EIP. */4420 /** @todo Intel pseudo code only does the limit check for 16-bit4421 * operands, AMD does not make any distinction. What is right? */4422 if (uNewEip > pCtx->csHid.u32Limit)4423 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);4424 4425 /* commit the operation. */4426 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);4427 if (rcStrict != VINF_SUCCESS)4428 return rcStrict;4429 pCtx->rip = uNewEip;4430 pCtx->cs = uNewCs;4431 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;4432 /** @todo do we load attribs and limit as well? */4433 if (cbPop)4434 iemRegAddToRsp(pCtx, cbPop);4435 return VINF_SUCCESS;4436 }4437 4438 AssertFailed();4439 return VERR_NOT_IMPLEMENTED;4440 }4441 4442 4443 /**4444 * Implements retn.4445 *4446 * We're doing this in C because of the \#GP that might be raised if the popped4447 * program counter is out of bounds.4448 *4449 * @param enmEffOpSize The effective operand size.4450 * @param cbPop The amount of arguments to pop from the stack4451 * (bytes).4452 */4453 IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)4454 {4455 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4456 4457 /* Fetch the RSP from the stack. */4458 VBOXSTRICTRC rcStrict;4459 RTUINT64U NewRip;4460 RTUINT64U NewRsp;4461 NewRsp.u = pCtx->rsp;4462 switch (enmEffOpSize)4463 {4464 case IEMMODE_16BIT:4465 NewRip.u = 0;4466 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);4467 break;4468 case IEMMODE_32BIT:4469 NewRip.u = 0;4470 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);4471 break;4472 case IEMMODE_64BIT:4473 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);4474 break;4475 IEM_NOT_REACHED_DEFAULT_CASE_RET();4476 }4477 if (rcStrict != VINF_SUCCESS)4478 return rcStrict;4479 4480 /* Check the new RSP before loading it. */4481 /** @todo Should test this as the intel+amd pseudo code doesn't mention half4482 * of it. The canonical test is performed here and for call. */4483 if (enmEffOpSize != IEMMODE_64BIT)4484 {4485 if (NewRip.DWords.dw0 > pCtx->csHid.u32Limit)4486 {4487 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));4488 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);4489 }4490 }4491 else4492 {4493 if (!IEM_IS_CANONICAL(NewRip.u))4494 {4495 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));4496 return iemRaiseNotCanonical(pIemCpu);4497 }4498 }4499 4500 /* Commit it. */4501 pCtx->rip = NewRip.u;4502 pCtx->rsp = NewRsp.u;4503 if (cbPop)4504 iemRegAddToRsp(pCtx, cbPop);4505 4506 return VINF_SUCCESS;4507 }4508 4509 4510 /**4511 * Implements int3 and int XX.4512 *4513 * @param u8Int The interrupt vector number.4514 * @param fIsBpInstr Is it the breakpoint instruction.4515 */4516 IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)4517 {4518 /** @todo we should call TRPM to do this job. */4519 VBOXSTRICTRC rcStrict;4520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4521 4522 /*4523 * Real mode is easy.4524 */4525 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT4526 && IEM_IS_REAL_MODE(pIemCpu))4527 {4528 /* read the IDT entry. */4529 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Int + 3)4530 return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Int << X86_TRAP_ERR_SEL_SHIFT));4531 RTFAR16 Idte;4532 rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Int);4533 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))4534 return rcStrict;4535 4536 /* push the stack frame. */4537 uint16_t *pu16Frame;4538 uint64_t uNewRsp;4539 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);4540 if (rcStrict != VINF_SUCCESS)4541 return rcStrict;4542 4543 pu16Frame[2] = (uint16_t)pCtx->eflags.u;4544 pu16Frame[1] = (uint16_t)pCtx->cs;4545 pu16Frame[0] = pCtx->ip + cbInstr;4546 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);4547 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))4548 return rcStrict;4549 4550 /* load the vector address into cs:ip. */4551 pCtx->cs = Idte.sel;4552 pCtx->csHid.u64Base = (uint32_t)Idte.sel << 4;4553 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */4554 pCtx->rip = Idte.off;4555 pCtx->eflags.Bits.u1IF = 0;4556 return VINF_SUCCESS;4557 }4558 4559 AssertFailed();4560 return VERR_NOT_IMPLEMENTED;4561 }4562 4563 4564 /**4565 * Implements iret.4566 *4567 * @param enmEffOpSize The effective operand size.4568 */4569 IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)4570 {4571 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4572 VBOXSTRICTRC rcStrict;4573 uint64_t uNewRsp;4574 4575 /*4576 * Real mode is easy, V8086 mode is relative similar.4577 */4578 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT4579 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))4580 {4581 /* iret throws an exception if VME isn't enabled. */4582 if ( pCtx->eflags.Bits.u1VM4583 && !(pCtx->cr4 & X86_CR4_VME))4584 return iemRaiseGeneralProtectionFault0(pIemCpu);4585 4586 /* Do the stack bits, but don't commit RSP before everything checks4587 out right. */4588 union4589 {4590 uint32_t const *pu32;4591 uint16_t const *pu16;4592 void const *pv;4593 } uFrame;4594 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);4595 uint16_t uNewCs;4596 uint32_t uNewEip;4597 uint32_t uNewFlags;4598 if (enmEffOpSize == IEMMODE_32BIT)4599 {4600 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);4601 if (rcStrict != VINF_SUCCESS)4602 return rcStrict;4603 uNewEip = uFrame.pu32[0];4604 uNewCs = (uint16_t)uFrame.pu32[1];4605 uNewFlags = uFrame.pu32[2];4606 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF4607 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT4608 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/4609 | X86_EFL_ID;4610 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);4611 }4612 else4613 {4614 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);4615 if (rcStrict != VINF_SUCCESS)4616 return rcStrict;4617 uNewEip = uFrame.pu16[0];4618 uNewCs = uFrame.pu16[1];4619 uNewFlags = uFrame.pu16[2];4620 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF4621 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;4622 uNewFlags |= pCtx->eflags.u & (UINT16_C(0xffff0000) | X86_EFL_1);4623 /** @todo The intel pseudo code does not indicate what happens to4624 * reserved flags. We just ignore them. */4625 }4626 /** @todo Check how this is supposed to work if sp=0xfffe. */4627 4628 /* Check the limit of the new EIP. */4629 /** @todo Only the AMD pseudo code check the limit here, what's4630 * right? */4631 if (uNewEip > pCtx->csHid.u32Limit)4632 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);4633 4634 /* V8086 checks and flag adjustments */4635 if (pCtx->eflags.Bits.u1VM)4636 {4637 if (pCtx->eflags.Bits.u2IOPL == 3)4638 {4639 /* Preserve IOPL and clear RF. */4640 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);4641 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);4642 }4643 else if ( enmEffOpSize == IEMMODE_16BIT4644 && ( !(uNewFlags & X86_EFL_IF)4645 || !pCtx->eflags.Bits.u1VIP )4646 && !(uNewFlags & X86_EFL_TF) )4647 {4648 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/4649 uNewFlags &= ~X86_EFL_VIF;4650 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);4651 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);4652 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);4653 }4654 else4655 return iemRaiseGeneralProtectionFault0(pIemCpu);4656 }4657 4658 /* commit the operation. */4659 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);4660 if (rcStrict != VINF_SUCCESS)4661 return rcStrict;4662 pCtx->rip = uNewEip;4663 pCtx->cs = uNewCs;4664 pCtx->csHid.u64Base = (uint32_t)uNewCs << 4;4665 /** @todo do we load attribs and limit as well? */4666 Assert(uNewFlags & X86_EFL_1);4667 pCtx->eflags.u = uNewFlags;4668 4669 return VINF_SUCCESS;4670 }4671 4672 4673 AssertFailed();4674 return VERR_NOT_IMPLEMENTED;4675 }4676 4677 4678 /**4679 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.4680 *4681 * @param iSegReg The segment register number (valid).4682 * @param uSel The new selector value.4683 */4684 IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)4685 {4686 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4687 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);4688 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);4689 4690 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);4691 4692 /*4693 * Real mode and V8086 mode are easy.4694 */4695 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT4696 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))4697 {4698 *pSel = uSel;4699 pHid->u64Base = (uint32_t)uSel << 4;4700 /** @todo Does the CPU actually load limits and attributes in the4701 * real/V8086 mode segment load case? It doesn't for CS in far4702 * jumps... Affects unreal mode. */4703 pHid->u32Limit = 0xffff;4704 pHid->Attr.u = 0;4705 pHid->Attr.n.u1Present = 1;4706 pHid->Attr.n.u1DescType = 1;4707 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS4708 ? X86_SEL_TYPE_RW4709 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;4710 4711 iemRegAddToRip(pIemCpu, cbInstr);4712 return VINF_SUCCESS;4713 }4714 4715 /*4716 * Protected mode.4717 *4718 * Check if it's a null segment selector value first, that's OK for DS, ES,4719 * FS and GS. If not null, then we have to load and parse the descriptor.4720 */4721 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))4722 {4723 if (iSegReg == X86_SREG_SS)4724 {4725 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT4726 || pIemCpu->uCpl != 04727 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */4728 {4729 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));4730 return iemRaiseGeneralProtectionFault0(pIemCpu);4731 }4732 4733 /* In 64-bit kernel mode, the stack can be 0 because of the way4734 interrupts are dispatched when in kernel ctx. Just load the4735 selector value into the register and leave the hidden bits4736 as is. */4737 *pSel = uSel;4738 iemRegAddToRip(pIemCpu, cbInstr);4739 return VINF_SUCCESS;4740 }4741 4742 *pSel = uSel; /* Not RPL, remember :-) */4743 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT4744 && iSegReg != X86_SREG_FS4745 && iSegReg != X86_SREG_GS)4746 {4747 /** @todo figure out what this actually does, it works. Needs4748 * testcase! */4749 pHid->Attr.u = 0;4750 pHid->Attr.n.u1Present = 1;4751 pHid->Attr.n.u1Long = 1;4752 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;4753 pHid->Attr.n.u2Dpl = 3;4754 pHid->u32Limit = 0;4755 pHid->u64Base = 0;4756 }4757 else4758 {4759 pHid->Attr.u = 0;4760 pHid->u32Limit = 0;4761 pHid->u64Base = 0;4762 }4763 iemRegAddToRip(pIemCpu, cbInstr);4764 return VINF_SUCCESS;4765 }4766 4767 /* Fetch the descriptor. */4768 IEMSELDESC Desc;4769 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);4770 if (rcStrict != VINF_SUCCESS)4771 return rcStrict;4772 4773 /* Check GPs first. */4774 if (!Desc.Legacy.Gen.u1DescType)4775 {4776 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));4777 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4778 }4779 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */4780 {4781 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)4782 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )4783 {4784 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));4785 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4786 }4787 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)4788 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )4789 {4790 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));4791 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4792 }4793 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)4794 {4795 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));4796 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4797 }4798 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)4799 {4800 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));4801 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4802 }4803 }4804 else4805 {4806 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)4807 {4808 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));4809 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4810 }4811 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))4812 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))4813 {4814 #if 0 /* this is what intel says. */4815 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl4816 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)4817 {4818 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",4819 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));4820 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4821 }4822 #else /* this is what makes more sense. */4823 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)4824 {4825 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",4826 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));4827 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4828 }4829 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)4830 {4831 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",4832 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));4833 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));4834 }4835 #endif4836 }4837 }4838 4839 /* Is it there? */4840 if (!Desc.Legacy.Gen.u1Present)4841 {4842 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));4843 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);4844 }4845 4846 /* The the base and limit. */4847 uint64_t u64Base;4848 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);4849 if (Desc.Legacy.Gen.u1Granularity)4850 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;4851 4852 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT4853 && iSegReg < X86_SREG_FS)4854 u64Base = 0;4855 else4856 u64Base = X86DESC_BASE(Desc.Legacy);4857 4858 /*4859 * Ok, everything checked out fine. Now set the accessed bit before4860 * committing the result into the registers.4861 */4862 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4863 {4864 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);4865 if (rcStrict != VINF_SUCCESS)4866 return rcStrict;4867 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4868 }4869 4870 /* commit */4871 *pSel = uSel;4872 pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */4873 pHid->u32Limit = cbLimit;4874 pHid->u64Base = u64Base;4875 4876 /** @todo check if the hidden bits are loaded correctly for 64-bit4877 * mode. */4878 4879 iemRegAddToRip(pIemCpu, cbInstr);4880 return VINF_SUCCESS;4881 }4882 4883 4884 /**4885 * Implements 'mov SReg, r/m'.4886 *4887 * @param iSegReg The segment register number (valid).4888 * @param uSel The new selector value.4889 */4890 IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)4891 {4892 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);4893 if (rcStrict == VINF_SUCCESS)4894 {4895 if (iSegReg == X86_SREG_SS)4896 {4897 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4898 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);4899 }4900 }4901 return rcStrict;4902 }4903 4904 4905 /**4906 * Implements 'pop SReg'.4907 *4908 * @param iSegReg The segment register number (valid).4909 * @param enmEffOpSize The efficient operand size (valid).4910 */4911 IEM_CIMPL_DEF_2(iemOpCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)4912 {4913 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4914 VBOXSTRICTRC rcStrict;4915 4916 /*4917 * Read the selector off the stack and join paths with mov ss, reg.4918 */4919 RTUINT64U TmpRsp;4920 TmpRsp.u = pCtx->rsp;4921 switch (enmEffOpSize)4922 {4923 case IEMMODE_16BIT:4924 {4925 uint16_t uSel;4926 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);4927 if (rcStrict == VINF_SUCCESS)4928 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);4929 break;4930 }4931 4932 case IEMMODE_32BIT:4933 {4934 uint32_t u32Value;4935 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);4936 if (rcStrict == VINF_SUCCESS)4937 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);4938 break;4939 }4940 4941 case IEMMODE_64BIT:4942 {4943 uint64_t u64Value;4944 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);4945 if (rcStrict == VINF_SUCCESS)4946 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);4947 break;4948 }4949 IEM_NOT_REACHED_DEFAULT_CASE_RET();4950 }4951 4952 /*4953 * Commit the stack on success.4954 */4955 if (rcStrict == VINF_SUCCESS)4956 {4957 pCtx->rsp = TmpRsp.u;4958 if (iSegReg == X86_SREG_SS)4959 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);4960 }4961 return rcStrict;4962 }4963 4964 4965 /**4966 * Implements lgs, lfs, les, lds & lss.4967 */4968 IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,4969 uint16_t, uSel,4970 uint64_t, offSeg,4971 uint8_t, iSegReg,4972 uint8_t, iGReg,4973 IEMMODE, enmEffOpSize)4974 {4975 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);4976 VBOXSTRICTRC rcStrict;4977 4978 /*4979 * Use iemCImpl_LoadSReg to do the tricky segment register loading.4980 */4981 /** @todo verify and test that mov, pop and lXs works the segment4982 * register loading in the exact same way. */4983 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);4984 if (rcStrict == VINF_SUCCESS)4985 {4986 switch (enmEffOpSize)4987 {4988 case IEMMODE_16BIT:4989 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;4990 break;4991 case IEMMODE_32BIT:4992 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;4993 break;4994 case IEMMODE_64BIT:4995 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;4996 break;4997 IEM_NOT_REACHED_DEFAULT_CASE_RET();4998 }4999 }5000 5001 return rcStrict;5002 }5003 5004 5005 /**5006 * Implements lgdt.5007 *5008 * @param iEffSeg The segment of the new ldtr contents5009 * @param GCPtrEffSrc The address of the new ldtr contents.5010 * @param enmEffOpSize The effective operand size.5011 */5012 IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)5013 {5014 if (pIemCpu->uCpl != 0)5015 return iemRaiseGeneralProtectionFault0(pIemCpu);5016 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);5017 5018 /*5019 * Fetch the limit and base address.5020 */5021 uint16_t cbLimit;5022 RTGCPTR GCPtrBase;5023 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);5024 if (rcStrict == VINF_SUCCESS)5025 {5026 if (IEM_VERIFICATION_ENABLED(pIemCpu))5027 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);5028 else5029 {5030 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5031 pCtx->gdtr.cbGdt = cbLimit;5032 pCtx->gdtr.pGdt = GCPtrBase;5033 }5034 if (rcStrict == VINF_SUCCESS)5035 iemRegAddToRip(pIemCpu, cbInstr);5036 }5037 return rcStrict;5038 }5039 5040 5041 /**5042 * Implements lidt.5043 *5044 * @param iEffSeg The segment of the new ldtr contents5045 * @param GCPtrEffSrc The address of the new ldtr contents.5046 * @param enmEffOpSize The effective operand size.5047 */5048 IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)5049 {5050 if (pIemCpu->uCpl != 0)5051 return iemRaiseGeneralProtectionFault0(pIemCpu);5052 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);5053 5054 /*5055 * Fetch the limit and base address.5056 */5057 uint16_t cbLimit;5058 RTGCPTR GCPtrBase;5059 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);5060 if (rcStrict == VINF_SUCCESS)5061 {5062 if (IEM_VERIFICATION_ENABLED(pIemCpu))5063 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);5064 else5065 {5066 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5067 pCtx->idtr.cbIdt = cbLimit;5068 pCtx->idtr.pIdt = GCPtrBase;5069 }5070 if (rcStrict == VINF_SUCCESS)5071 iemRegAddToRip(pIemCpu, cbInstr);5072 }5073 return rcStrict;5074 }5075 5076 5077 /**5078 * Implements mov GReg,CRx.5079 *5080 * @param iGReg The general register to store the CRx value in.5081 * @param iCrReg The CRx register to read (valid).5082 */5083 IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)5084 {5085 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5086 if (pIemCpu->uCpl != 0)5087 return iemRaiseGeneralProtectionFault0(pIemCpu);5088 Assert(!pCtx->eflags.Bits.u1VM);5089 5090 /* read it */5091 uint64_t crX;5092 switch (iCrReg)5093 {5094 case 0: crX = pCtx->cr0; break;5095 case 2: crX = pCtx->cr2; break;5096 case 3: crX = pCtx->cr3; break;5097 case 4: crX = pCtx->cr4; break;5098 case 8:5099 if (IEM_VERIFICATION_ENABLED(pIemCpu))5100 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */5101 else5102 crX = 0xff;5103 break;5104 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */5105 }5106 5107 /* store it */5108 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)5109 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;5110 else5111 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;5112 5113 iemRegAddToRip(pIemCpu, cbInstr);5114 return VINF_SUCCESS;5115 }5116 5117 5118 /**5119 * Implements mov CRx,GReg.5120 *5121 * @param iCrReg The CRx register to read (valid).5122 * @param iGReg The general register to store the CRx value in.5123 */5124 IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)5125 {5126 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5127 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);5128 VBOXSTRICTRC rcStrict;5129 int rc;5130 5131 if (pIemCpu->uCpl != 0)5132 return iemRaiseGeneralProtectionFault0(pIemCpu);5133 Assert(!pCtx->eflags.Bits.u1VM);5134 5135 /*5136 * Read the new value from the source register.5137 */5138 uint64_t NewCrX;5139 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)5140 NewCrX = iemGRegFetchU64(pIemCpu, iGReg);5141 else5142 NewCrX = iemGRegFetchU32(pIemCpu, iGReg);5143 5144 /*5145 * Try store it.5146 * Unfortunately, CPUM only does a tiny bit of the work.5147 */5148 switch (iCrReg)5149 {5150 case 0:5151 {5152 /*5153 * Perform checks.5154 */5155 uint64_t const OldCrX = pCtx->cr0;5156 NewCrX |= X86_CR0_ET; /* hardcoded */5157 5158 /* Check for reserved bits. */5159 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS5160 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM5161 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;5162 if (NewCrX & ~(uint64_t)fValid)5163 {5164 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", NewCrX, NewCrX & ~(uint64_t)fValid));5165 return iemRaiseGeneralProtectionFault0(pIemCpu);5166 }5167 5168 /* Check for invalid combinations. */5169 if ( (NewCrX & X86_CR0_PG)5170 && !(NewCrX & X86_CR0_PE) )5171 {5172 Log(("Trying to set CR0.PG without CR0.PE\n"));5173 return iemRaiseGeneralProtectionFault0(pIemCpu);5174 }5175 5176 if ( !(NewCrX & X86_CR0_CD)5177 && (NewCrX & X86_CR0_NW) )5178 {5179 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));5180 return iemRaiseGeneralProtectionFault0(pIemCpu);5181 }5182 5183 /* Long mode consistency checks. */5184 if ( (NewCrX & X86_CR0_PG)5185 && !(OldCrX & X86_CR0_PG)5186 && (pCtx->msrEFER & MSR_K6_EFER_LME) )5187 {5188 if (!(pCtx->cr4 & X86_CR4_PAE))5189 {5190 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));5191 return iemRaiseGeneralProtectionFault0(pIemCpu);5192 }5193 if (pCtx->csHid.Attr.n.u1Long)5194 {5195 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));5196 return iemRaiseGeneralProtectionFault0(pIemCpu);5197 }5198 }5199 5200 /** @todo check reserved PDPTR bits as AMD states. */5201 5202 /*5203 * Change CR0.5204 */5205 if (IEM_VERIFICATION_ENABLED(pIemCpu))5206 {5207 rc = CPUMSetGuestCR0(pVCpu, NewCrX);5208 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);5209 }5210 else5211 pCtx->cr0 = NewCrX;5212 Assert(pCtx->cr0 == NewCrX);5213 5214 /*5215 * Change EFER.LMA if entering or leaving long mode.5216 */5217 if ( (NewCrX & X86_CR0_PG) != (OldCrX & X86_CR0_PG)5218 && (pCtx->msrEFER & MSR_K6_EFER_LME) )5219 {5220 uint64_t NewEFER = pCtx->msrEFER;5221 if (NewCrX & X86_CR0_PG)5222 NewEFER |= MSR_K6_EFER_LME;5223 else5224 NewEFER &= ~MSR_K6_EFER_LME;5225 5226 if (IEM_VERIFICATION_ENABLED(pIemCpu))5227 CPUMSetGuestEFER(pVCpu, NewEFER);5228 else5229 pCtx->msrEFER = NewEFER;5230 Assert(pCtx->msrEFER == NewEFER);5231 }5232 5233 /*5234 * Inform PGM.5235 */5236 if (IEM_VERIFICATION_ENABLED(pIemCpu))5237 {5238 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))5239 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )5240 {5241 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);5242 AssertRCReturn(rc, rc);5243 /* ignore informational status codes */5244 }5245 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);5246 /** @todo Status code management. */5247 }5248 else5249 rcStrict = VINF_SUCCESS;5250 break;5251 }5252 5253 /*5254 * CR2 can be changed without any restrictions.5255 */5256 case 2:5257 pCtx->cr2 = NewCrX;5258 rcStrict = VINF_SUCCESS;5259 break;5260 5261 /*5262 * CR3 is relatively simple, although AMD and Intel have different5263 * accounts of how setting reserved bits are handled. We take intel's5264 * word for the lower bits and AMD's for the high bits (63:52).5265 */5266 /** @todo Testcase: Setting reserved bits in CR3, especially before5267 * enabling paging. */5268 case 3:5269 {5270 /* check / mask the value. */5271 if (NewCrX & UINT64_C(0xfff0000000000000))5272 {5273 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", NewCrX));5274 return iemRaiseGeneralProtectionFault0(pIemCpu);5275 }5276 5277 uint64_t fValid;5278 if ( (pCtx->cr4 & X86_CR4_PAE)5279 && (pCtx->msrEFER & MSR_K6_EFER_LME))5280 fValid = UINT64_C(0x000ffffffffff014);5281 else if (pCtx->cr4 & X86_CR4_PAE)5282 fValid = UINT64_C(0xfffffff4);5283 else5284 fValid = UINT64_C(0xfffff014);5285 if (NewCrX & ~fValid)5286 {5287 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",5288 NewCrX, NewCrX & ~fValid));5289 NewCrX &= fValid;5290 }5291 5292 /** @todo If we're in PAE mode we should check the PDPTRs for5293 * invalid bits. */5294 5295 /* Make the change. */5296 if (IEM_VERIFICATION_ENABLED(pIemCpu))5297 {5298 rc = CPUMSetGuestCR3(pVCpu, NewCrX);5299 AssertRCSuccessReturn(rc, rc);5300 }5301 else5302 pCtx->cr3 = NewCrX;5303 5304 /* Inform PGM. */5305 if (IEM_VERIFICATION_ENABLED(pIemCpu))5306 {5307 if (pCtx->cr0 & X86_CR0_PG)5308 {5309 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));5310 AssertRCReturn(rc, rc);5311 /* ignore informational status codes */5312 /** @todo status code management */5313 }5314 }5315 rcStrict = VINF_SUCCESS;5316 break;5317 }5318 5319 /*5320 * CR4 is a bit more tedious as there are bits which cannot be cleared5321 * under some circumstances and such.5322 */5323 case 4:5324 {5325 uint64_t const OldCrX = pCtx->cr0;5326 5327 /* reserved bits */5328 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI5329 | X86_CR4_TSD | X86_CR4_DE5330 | X86_CR4_PSE | X86_CR4_PAE5331 | X86_CR4_MCE | X86_CR4_PGE5332 | X86_CR4_PCE | X86_CR4_OSFSXR5333 | X86_CR4_OSXMMEEXCPT;5334 //if (xxx)5335 // fValid |= X86_CR4_VMXE;5336 //if (xxx)5337 // fValid |= X86_CR4_OSXSAVE;5338 if (NewCrX & ~(uint64_t)fValid)5339 {5340 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", NewCrX, NewCrX & ~(uint64_t)fValid));5341 return iemRaiseGeneralProtectionFault0(pIemCpu);5342 }5343 5344 /* long mode checks. */5345 if ( (OldCrX & X86_CR4_PAE)5346 && !(NewCrX & X86_CR4_PAE)5347 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )5348 {5349 Log(("Trying to set clear CR4.PAE while long mode is active\n"));5350 return iemRaiseGeneralProtectionFault0(pIemCpu);5351 }5352 5353 5354 /*5355 * Change it.5356 */5357 if (IEM_VERIFICATION_ENABLED(pIemCpu))5358 {5359 rc = CPUMSetGuestCR4(pVCpu, NewCrX);5360 AssertRCSuccessReturn(rc, rc);5361 }5362 else5363 pCtx->cr4 = NewCrX;5364 Assert(pCtx->cr4 == NewCrX);5365 5366 /*5367 * Notify SELM and PGM.5368 */5369 if (IEM_VERIFICATION_ENABLED(pIemCpu))5370 {5371 /* SELM - VME may change things wrt to the TSS shadowing. */5372 if ((NewCrX ^ OldCrX) & X86_CR4_VME)5373 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);5374 5375 /* PGM - flushing and mode. */5376 if ( (NewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))5377 != (OldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )5378 {5379 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);5380 AssertRCReturn(rc, rc);5381 /* ignore informational status codes */5382 }5383 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);5384 /** @todo Status code management. */5385 }5386 else5387 rcStrict = VINF_SUCCESS;5388 break;5389 }5390 5391 /*5392 * CR8 maps to the APIC TPR.5393 */5394 case 8:5395 if (IEM_VERIFICATION_ENABLED(pIemCpu))5396 AssertFailedReturn(VERR_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */5397 else5398 rcStrict = VINF_SUCCESS;5399 break;5400 5401 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */5402 }5403 5404 /*5405 * Advance the RIP on success.5406 */5407 /** @todo Status code management. */5408 if (rcStrict == VINF_SUCCESS)5409 iemRegAddToRip(pIemCpu, cbInstr);5410 return rcStrict;5411 }5412 5413 5414 /**5415 * Implements 'IN eAX, port'.5416 *5417 * @param u16Port The source port.5418 * @param cbReg The register size.5419 */5420 IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)5421 {5422 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5423 5424 /*5425 * CPL check5426 */5427 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);5428 if (rcStrict != VINF_SUCCESS)5429 return rcStrict;5430 5431 /*5432 * Perform the I/O.5433 */5434 uint32_t u32Value;5435 if (IEM_VERIFICATION_ENABLED(pIemCpu))5436 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);5437 else5438 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);5439 if (IOM_SUCCESS(rcStrict))5440 {5441 switch (cbReg)5442 {5443 case 1: pCtx->al = (uint8_t)u32Value; break;5444 case 2: pCtx->ax = (uint16_t)u32Value; break;5445 case 4: pCtx->rax = u32Value; break;5446 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);5447 }5448 iemRegAddToRip(pIemCpu, cbInstr);5449 pIemCpu->cPotentialExits++;5450 }5451 /** @todo massage rcStrict. */5452 return rcStrict;5453 }5454 5455 5456 /**5457 * Implements 'IN eAX, DX'.5458 *5459 * @param cbReg The register size.5460 */5461 IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)5462 {5463 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);5464 }5465 5466 5467 /**5468 * Implements 'OUT port, eAX'.5469 *5470 * @param u16Port The destination port.5471 * @param cbReg The register size.5472 */5473 IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)5474 {5475 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5476 5477 /*5478 * CPL check5479 */5480 if ( (pCtx->cr0 & X86_CR0_PE)5481 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL5482 || pCtx->eflags.Bits.u1VM) )5483 {5484 /** @todo I/O port permission bitmap check */5485 AssertFailedReturn(VERR_NOT_IMPLEMENTED);5486 }5487 5488 /*5489 * Perform the I/O.5490 */5491 uint32_t u32Value;5492 switch (cbReg)5493 {5494 case 1: u32Value = pCtx->al; break;5495 case 2: u32Value = pCtx->ax; break;5496 case 4: u32Value = pCtx->eax; break;5497 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);5498 }5499 VBOXSTRICTRC rc;5500 if (IEM_VERIFICATION_ENABLED(pIemCpu))5501 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);5502 else5503 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);5504 if (IOM_SUCCESS(rc))5505 {5506 iemRegAddToRip(pIemCpu, cbInstr);5507 pIemCpu->cPotentialExits++;5508 /** @todo massage rc. */5509 }5510 return rc;5511 }5512 5513 5514 /**5515 * Implements 'OUT DX, eAX'.5516 *5517 * @param cbReg The register size.5518 */5519 IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)5520 {5521 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);5522 }5523 5524 5525 /**5526 * Implements 'CLI'.5527 */5528 IEM_CIMPL_DEF_0(iemCImpl_cli)5529 {5530 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5531 5532 if (pCtx->cr0 & X86_CR0_PE)5533 {5534 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;5535 if (!pCtx->eflags.Bits.u1VM)5536 {5537 if (pIemCpu->uCpl <= uIopl)5538 pCtx->eflags.Bits.u1IF = 0;5539 else if ( pIemCpu->uCpl == 35540 && (pCtx->cr4 & X86_CR4_PVI) )5541 pCtx->eflags.Bits.u1VIF = 0;5542 else5543 return iemRaiseGeneralProtectionFault0(pIemCpu);5544 }5545 /* V8086 */5546 else if (uIopl == 3)5547 pCtx->eflags.Bits.u1IF = 0;5548 else if ( uIopl < 35549 && (pCtx->cr4 & X86_CR4_VME) )5550 pCtx->eflags.Bits.u1VIF = 0;5551 else5552 return iemRaiseGeneralProtectionFault0(pIemCpu);5553 }5554 /* real mode */5555 else5556 pCtx->eflags.Bits.u1IF = 0;5557 iemRegAddToRip(pIemCpu, cbInstr);5558 return VINF_SUCCESS;5559 }5560 5561 5562 /**5563 * Implements 'STI'.5564 */5565 IEM_CIMPL_DEF_0(iemCImpl_sti)5566 {5567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);5568 5569 if (pCtx->cr0 & X86_CR0_PE)5570 {5571 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;5572 if (!pCtx->eflags.Bits.u1VM)5573 {5574 if (pIemCpu->uCpl <= uIopl)5575 pCtx->eflags.Bits.u1IF = 1;5576 else if ( pIemCpu->uCpl == 35577 && (pCtx->cr4 & X86_CR4_PVI)5578 && !pCtx->eflags.Bits.u1VIP )5579 pCtx->eflags.Bits.u1VIF = 1;5580 else5581 return iemRaiseGeneralProtectionFault0(pIemCpu);5582 }5583 /* V8086 */5584 else if (uIopl == 3)5585 pCtx->eflags.Bits.u1IF = 1;5586 else if ( uIopl < 35587 && (pCtx->cr4 & X86_CR4_VME)5588 && !pCtx->eflags.Bits.u1VIP )5589 pCtx->eflags.Bits.u1VIF = 1;5590 else5591 return iemRaiseGeneralProtectionFault0(pIemCpu);5592 }5593 /* real mode */5594 else5595 pCtx->eflags.Bits.u1IF = 1;5596 5597 iemRegAddToRip(pIemCpu, cbInstr);5598 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);5599 return VINF_SUCCESS;5600 }5601 5602 5603 /**5604 * Implements 'HLT'.5605 */5606 IEM_CIMPL_DEF_0(iemCImpl_hlt)5607 {5608 if (pIemCpu->uCpl != 0)5609 return iemRaiseGeneralProtectionFault0(pIemCpu);5610 iemRegAddToRip(pIemCpu, cbInstr);5611 return VINF_EM_HALT;5612 }5613 5614 5615 3470 /* 5616 * Instantiate the various string operation combinations. 5617 */ 5618 #define OP_SIZE 8 5619 #define ADDR_SIZE 16 5620 #include "IEMAllCImplStrInstr.cpp.h" 5621 #define OP_SIZE 8 5622 #define ADDR_SIZE 32 5623 #include "IEMAllCImplStrInstr.cpp.h" 5624 #define OP_SIZE 8 5625 #define ADDR_SIZE 64 5626 #include "IEMAllCImplStrInstr.cpp.h" 5627 5628 #define OP_SIZE 16 5629 #define ADDR_SIZE 16 5630 #include "IEMAllCImplStrInstr.cpp.h" 5631 #define OP_SIZE 16 5632 #define ADDR_SIZE 32 5633 #include "IEMAllCImplStrInstr.cpp.h" 5634 #define OP_SIZE 16 5635 #define ADDR_SIZE 64 5636 #include "IEMAllCImplStrInstr.cpp.h" 5637 5638 #define OP_SIZE 32 5639 #define ADDR_SIZE 16 5640 #include "IEMAllCImplStrInstr.cpp.h" 5641 #define OP_SIZE 32 5642 #define ADDR_SIZE 32 5643 #include "IEMAllCImplStrInstr.cpp.h" 5644 #define OP_SIZE 32 5645 #define ADDR_SIZE 64 5646 #include "IEMAllCImplStrInstr.cpp.h" 5647 5648 #define OP_SIZE 64 5649 #define ADDR_SIZE 32 5650 #include "IEMAllCImplStrInstr.cpp.h" 5651 #define OP_SIZE 64 5652 #define ADDR_SIZE 64 5653 #include "IEMAllCImplStrInstr.cpp.h" 5654 5655 5656 /** @} */ 3471 * Include the C/C++ implementation of instruction. 3472 */ 3473 #include "IEMAllCImpl.cpp.h" 3474 5657 3475 5658 3476 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r36833 r36834 1 1 /* $Id$ */ 2 2 /** @file 3 * IEM - In terpreted Execution Manager - All Contexts.3 * IEM - Instruction Implementation in C/C++ (code include). 4 4 */ 5 5 … … 17 17 18 18 19 /** @page pg_iem IEM - Interpreted Execution Manager20 *21 * The interpreted exeuction manager (IEM) is for executing short guest code22 * sequences that are causing too many exits / virtualization traps. It will23 * also be used to interpret single instructions, thus replacing the selective24 * interpreters in EM and IOM.25 *26 * Design goals:27 * - Relatively small footprint, although we favour speed and correctness28 * over size.29 * - Reasonably fast.30 * - Correctly handle lock prefixed instructions.31 * - Complete instruction set - eventually.32 * - Refactorable into a recompiler, maybe.33 * - Replace EMInterpret*.34 *35 * Using the existing disassembler has been considered, however this is thought36 * to conflict with speed as the disassembler chews things a bit too much while37 * leaving us with a somewhat complicated state to interpret afterwards.38 *39 *40 * The current code is very much work in progress. You've been warned!41 *42 */43 44 /*******************************************************************************45 * Header Files *46 *******************************************************************************/47 #define LOG_GROUP LOG_GROUP_EM /** @todo add log group */48 #include <VBox/vmm/iem.h>49 #include <VBox/vmm/pgm.h>50 #include <VBox/vmm/iom.h>51 #include <VBox/vmm/em.h>52 #include <VBox/vmm/dbgf.h>53 #ifdef IEM_VERIFICATION_MODE54 # include <VBox/vmm/rem.h>55 # include <VBox/vmm/mm.h>56 #endif57 #include "IEMInternal.h"58 #include <VBox/vmm/vm.h>59 #include <VBox/log.h>60 #include <VBox/err.h>61 #include <VBox/param.h>62 #include <VBox/x86.h>63 #include <iprt/assert.h>64 #include <iprt/string.h>65 66 67 /*******************************************************************************68 * Structures and Typedefs *69 *******************************************************************************/70 /** @typedef PFNIEMOP71 * Pointer to an opcode decoder function.72 */73 74 /** @def FNIEMOP_DEF75 * Define an opcode decoder function.76 *77 * We're using macors for this so that adding and removing parameters as well as78 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL79 *80 * @param a_Name The function name.81 */82 83 84 #if defined(__GNUC__) && defined(RT_ARCH_X86)85 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);86 # define FNIEMOP_DEF(a_Name) \87 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)88 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \89 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW90 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \91 static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW92 93 #elif defined(_MSC_VER) && defined(RT_ARCH_X86)94 typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);95 # define FNIEMOP_DEF(a_Name) \96 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW97 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \98 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW99 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \100 static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW101 102 #else103 typedef VBOXSTRICTRC (* PFNIEMOP)(PIEMCPU pIemCpu);104 # define FNIEMOP_DEF(a_Name) \105 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW106 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \107 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW108 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \109 static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW110 111 #endif112 113 114 /**115 * Function table for a binary operator providing implementation based on116 * operand size.117 */118 typedef struct IEMOPBINSIZES119 {120 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;121 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;122 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;123 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;124 } IEMOPBINSIZES;125 /** Pointer to a binary operator function table. */126 typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;127 128 129 /**130 * Function table for a unary operator providing implementation based on131 * operand size.132 */133 typedef struct IEMOPUNARYSIZES134 {135 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;136 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;137 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;138 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;139 } IEMOPUNARYSIZES;140 /** Pointer to a unary operator function table. */141 typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;142 143 144 /**145 * Function table for a shift operator providing implementation based on146 * operand size.147 */148 typedef struct IEMOPSHIFTSIZES149 {150 PFNIEMAIMPLSHIFTU8 pfnNormalU8;151 PFNIEMAIMPLSHIFTU16 pfnNormalU16;152 PFNIEMAIMPLSHIFTU32 pfnNormalU32;153 PFNIEMAIMPLSHIFTU64 pfnNormalU64;154 } IEMOPSHIFTSIZES;155 /** Pointer to a shift operator function table. */156 typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;157 158 159 /**160 * Function table for a multiplication or division operation.161 */162 typedef struct IEMOPMULDIVSIZES163 {164 PFNIEMAIMPLMULDIVU8 pfnU8;165 PFNIEMAIMPLMULDIVU16 pfnU16;166 PFNIEMAIMPLMULDIVU32 pfnU32;167 PFNIEMAIMPLMULDIVU64 pfnU64;168 } IEMOPMULDIVSIZES;169 /** Pointer to a multiplication or division operation function table. */170 typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;171 172 173 /**174 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.175 */176 typedef union IEMSELDESC177 {178 /** The legacy view. */179 X86DESC Legacy;180 /** The long mode view. */181 X86DESC64 Long;182 } IEMSELDESC;183 /** Pointer to a selector descriptor table entry. */184 typedef IEMSELDESC *PIEMSELDESC;185 186 187 /*******************************************************************************188 * Defined Constants And Macros *189 *******************************************************************************/190 /** Temporary hack to disable the double execution. Will be removed in favor191 * of a dedicated execution mode in EM. */192 //#define IEM_VERIFICATION_MODE_NO_REM193 194 /** Used to shut up GCC warnings about variables that 'may be used uninitialized'195 * due to GCC lacking knowledge about the value range of a switch. */196 #define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_INTERNAL_ERROR_4)197 198 /**199 * Call an opcode decoder function.200 *201 * We're using macors for this so that adding and removing parameters can be202 * done as we please. See FNIEMOP_DEF.203 */204 #define FNIEMOP_CALL(a_pfn) (a_pfn)(pIemCpu)205 206 /**207 * Call a common opcode decoder function taking one extra argument.208 *209 * We're using macors for this so that adding and removing parameters can be210 * done as we please. See FNIEMOP_DEF_1.211 */212 #define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)213 214 /**215 * Call a common opcode decoder function taking one extra argument.216 *217 * We're using macors for this so that adding and removing parameters can be218 * done as we please. See FNIEMOP_DEF_1.219 */220 #define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)221 222 /**223 * Check if we're currently executing in real or virtual 8086 mode.224 *225 * @returns @c true if it is, @c false if not.226 * @param a_pIemCpu The IEM state of the current CPU.227 */228 #define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))229 230 /**231 * Check if we're currently executing in long mode.232 *233 * @returns @c true if it is, @c false if not.234 * @param a_pIemCpu The IEM state of the current CPU.235 */236 #define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))237 238 /**239 * Check if we're currently executing in real mode.240 *241 * @returns @c true if it is, @c false if not.242 * @param a_pIemCpu The IEM state of the current CPU.243 */244 #define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))245 246 /**247 * Tests if an AMD CPUID feature (extended) is marked present - ECX.248 */249 #define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))250 251 /**252 * Check if the address is canonical.253 */254 #define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))255 256 257 /*******************************************************************************258 * Global Variables *259 *******************************************************************************/260 extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */261 262 263 /** Function table for the ADD instruction. */264 static const IEMOPBINSIZES g_iemAImpl_add =265 {266 iemAImpl_add_u8, iemAImpl_add_u8_locked,267 iemAImpl_add_u16, iemAImpl_add_u16_locked,268 iemAImpl_add_u32, iemAImpl_add_u32_locked,269 iemAImpl_add_u64, iemAImpl_add_u64_locked270 };271 272 /** Function table for the ADC instruction. */273 static const IEMOPBINSIZES g_iemAImpl_adc =274 {275 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,276 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,277 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,278 iemAImpl_adc_u64, iemAImpl_adc_u64_locked279 };280 281 /** Function table for the SUB instruction. */282 static const IEMOPBINSIZES g_iemAImpl_sub =283 {284 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,285 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,286 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,287 iemAImpl_sub_u64, iemAImpl_sub_u64_locked288 };289 290 /** Function table for the SBB instruction. */291 static const IEMOPBINSIZES g_iemAImpl_sbb =292 {293 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,294 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,295 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,296 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked297 };298 299 /** Function table for the OR instruction. */300 static const IEMOPBINSIZES g_iemAImpl_or =301 {302 iemAImpl_or_u8, iemAImpl_or_u8_locked,303 iemAImpl_or_u16, iemAImpl_or_u16_locked,304 iemAImpl_or_u32, iemAImpl_or_u32_locked,305 iemAImpl_or_u64, iemAImpl_or_u64_locked306 };307 308 /** Function table for the XOR instruction. */309 static const IEMOPBINSIZES g_iemAImpl_xor =310 {311 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,312 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,313 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,314 iemAImpl_xor_u64, iemAImpl_xor_u64_locked315 };316 317 /** Function table for the AND instruction. */318 static const IEMOPBINSIZES g_iemAImpl_and =319 {320 iemAImpl_and_u8, iemAImpl_and_u8_locked,321 iemAImpl_and_u16, iemAImpl_and_u16_locked,322 iemAImpl_and_u32, iemAImpl_and_u32_locked,323 iemAImpl_and_u64, iemAImpl_and_u64_locked324 };325 326 /** Function table for the CMP instruction.327 * @remarks Making operand order ASSUMPTIONS.328 */329 static const IEMOPBINSIZES g_iemAImpl_cmp =330 {331 iemAImpl_cmp_u8, NULL,332 iemAImpl_cmp_u16, NULL,333 iemAImpl_cmp_u32, NULL,334 iemAImpl_cmp_u64, NULL335 };336 337 /** Function table for the TEST instruction.338 * @remarks Making operand order ASSUMPTIONS.339 */340 static const IEMOPBINSIZES g_iemAImpl_test =341 {342 iemAImpl_test_u8, NULL,343 iemAImpl_test_u16, NULL,344 iemAImpl_test_u32, NULL,345 iemAImpl_test_u64, NULL346 };347 348 /** Function table for the IMUL instruction. */349 static const IEMOPBINSIZES g_iemAImpl_imul_two =350 {351 NULL, NULL,352 iemAImpl_imul_two_u16, NULL,353 iemAImpl_imul_two_u32, NULL,354 iemAImpl_imul_two_u64, NULL355 };356 357 /** Group 1 /r lookup table. */358 static const PCIEMOPBINSIZES g_apIemImplGrp1[8] =359 {360 &g_iemAImpl_add,361 &g_iemAImpl_or,362 &g_iemAImpl_adc,363 &g_iemAImpl_sbb,364 &g_iemAImpl_and,365 &g_iemAImpl_sub,366 &g_iemAImpl_xor,367 &g_iemAImpl_cmp368 };369 370 /** Function table for the INC instruction. */371 static const IEMOPUNARYSIZES g_iemAImpl_inc =372 {373 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,374 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,375 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,376 iemAImpl_inc_u64, iemAImpl_inc_u64_locked377 };378 379 /** Function table for the DEC instruction. */380 static const IEMOPUNARYSIZES g_iemAImpl_dec =381 {382 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,383 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,384 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,385 iemAImpl_dec_u64, iemAImpl_dec_u64_locked386 };387 388 /** Function table for the NEG instruction. */389 static const IEMOPUNARYSIZES g_iemAImpl_neg =390 {391 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,392 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,393 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,394 iemAImpl_neg_u64, iemAImpl_neg_u64_locked395 };396 397 /** Function table for the NOT instruction. */398 static const IEMOPUNARYSIZES g_iemAImpl_not =399 {400 iemAImpl_not_u8, iemAImpl_not_u8_locked,401 iemAImpl_not_u16, iemAImpl_not_u16_locked,402 iemAImpl_not_u32, iemAImpl_not_u32_locked,403 iemAImpl_not_u64, iemAImpl_not_u64_locked404 };405 406 407 /** Function table for the ROL instruction. */408 static const IEMOPSHIFTSIZES g_iemAImpl_rol =409 {410 iemAImpl_rol_u8,411 iemAImpl_rol_u16,412 iemAImpl_rol_u32,413 iemAImpl_rol_u64414 };415 416 /** Function table for the ROR instruction. */417 static const IEMOPSHIFTSIZES g_iemAImpl_ror =418 {419 iemAImpl_ror_u8,420 iemAImpl_ror_u16,421 iemAImpl_ror_u32,422 iemAImpl_ror_u64423 };424 425 /** Function table for the RCL instruction. */426 static const IEMOPSHIFTSIZES g_iemAImpl_rcl =427 {428 iemAImpl_rcl_u8,429 iemAImpl_rcl_u16,430 iemAImpl_rcl_u32,431 iemAImpl_rcl_u64432 };433 434 /** Function table for the RCR instruction. */435 static const IEMOPSHIFTSIZES g_iemAImpl_rcr =436 {437 iemAImpl_rcr_u8,438 iemAImpl_rcr_u16,439 iemAImpl_rcr_u32,440 iemAImpl_rcr_u64441 };442 443 /** Function table for the SHL instruction. */444 static const IEMOPSHIFTSIZES g_iemAImpl_shl =445 {446 iemAImpl_shl_u8,447 iemAImpl_shl_u16,448 iemAImpl_shl_u32,449 iemAImpl_shl_u64450 };451 452 /** Function table for the SHR instruction. */453 static const IEMOPSHIFTSIZES g_iemAImpl_shr =454 {455 iemAImpl_shr_u8,456 iemAImpl_shr_u16,457 iemAImpl_shr_u32,458 iemAImpl_shr_u64459 };460 461 /** Function table for the SAR instruction. */462 static const IEMOPSHIFTSIZES g_iemAImpl_sar =463 {464 iemAImpl_sar_u8,465 iemAImpl_sar_u16,466 iemAImpl_sar_u32,467 iemAImpl_sar_u64468 };469 470 471 /** Function table for the MUL instruction. */472 static const IEMOPMULDIVSIZES g_iemAImpl_mul =473 {474 iemAImpl_mul_u8,475 iemAImpl_mul_u16,476 iemAImpl_mul_u32,477 iemAImpl_mul_u64478 };479 480 /** Function table for the IMUL instruction working implicitly on rAX. */481 static const IEMOPMULDIVSIZES g_iemAImpl_imul =482 {483 iemAImpl_imul_u8,484 iemAImpl_imul_u16,485 iemAImpl_imul_u32,486 iemAImpl_imul_u64487 };488 489 /** Function table for the DIV instruction. */490 static const IEMOPMULDIVSIZES g_iemAImpl_div =491 {492 iemAImpl_div_u8,493 iemAImpl_div_u16,494 iemAImpl_div_u32,495 iemAImpl_div_u64496 };497 498 /** Function table for the MUL instruction. */499 static const IEMOPMULDIVSIZES g_iemAImpl_idiv =500 {501 iemAImpl_idiv_u8,502 iemAImpl_idiv_u16,503 iemAImpl_idiv_u32,504 iemAImpl_idiv_u64505 };506 507 508 /*******************************************************************************509 * Internal Functions *510 *******************************************************************************/511 static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);512 static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);513 static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);514 static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);515 static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);516 #ifdef IEM_VERIFICATION_MODE517 static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);518 #endif519 static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);520 static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);521 522 523 /**524 * Initializes the decoder state.525 *526 * @param pIemCpu The per CPU IEM state.527 */528 DECLINLINE(void) iemInitDecode(PIEMCPU pIemCpu)529 {530 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);531 532 pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));533 IEMMODE enmMode = CPUMIsGuestIn64BitCodeEx(pCtx)534 ? IEMMODE_64BIT535 : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */536 ? IEMMODE_32BIT537 : IEMMODE_16BIT;538 pIemCpu->enmCpuMode = enmMode;539 pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */540 pIemCpu->enmEffAddrMode = enmMode;541 pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */542 pIemCpu->enmEffOpSize = enmMode;543 pIemCpu->fPrefixes = 0;544 pIemCpu->uRexReg = 0;545 pIemCpu->uRexB = 0;546 pIemCpu->uRexIndex = 0;547 pIemCpu->iEffSeg = X86_SREG_DS;548 pIemCpu->offOpcode = 0;549 pIemCpu->cbOpcode = 0;550 pIemCpu->cActiveMappings = 0;551 pIemCpu->iNextMapping = 0;552 }553 554 555 /**556 * Prefetch opcodes the first time when starting executing.557 *558 * @returns Strict VBox status code.559 * @param pIemCpu The IEM state.560 */561 static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)562 {563 #ifdef IEM_VERIFICATION_MODE564 uint8_t const cbOldOpcodes = pIemCpu->cbOpcode;565 #endif566 iemInitDecode(pIemCpu);567 568 /*569 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.570 *571 * First translate CS:rIP to a physical address.572 */573 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);574 uint32_t cbToTryRead;575 RTGCPTR GCPtrPC;576 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)577 {578 cbToTryRead = PAGE_SIZE;579 GCPtrPC = pCtx->rip;580 if (!IEM_IS_CANONICAL(GCPtrPC))581 return iemRaiseGeneralProtectionFault0(pIemCpu);582 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);583 }584 else585 {586 uint32_t GCPtrPC32 = pCtx->eip;587 Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);588 if (GCPtrPC32 > pCtx->csHid.u32Limit)589 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);590 cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;591 GCPtrPC = pCtx->csHid.u64Base + GCPtrPC32;592 }593 594 RTGCPHYS GCPhys;595 uint64_t fFlags;596 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);597 if (RT_FAILURE(rc))598 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);599 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)600 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);601 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))602 return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);603 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;604 /** @todo Check reserved bits and such stuff. PGM is better at doing605 * that, so do it when implementing the guest virtual address606 * TLB... */607 608 #ifdef IEM_VERIFICATION_MODE609 /*610 * Optimistic optimization: Use unconsumed opcode bytes from the previous611 * instruction.612 */613 /** @todo optimize this differently by not using PGMPhysRead. */614 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;615 pIemCpu->GCPhysOpcodes = GCPhys;616 if (offPrevOpcodes < cbOldOpcodes)617 {618 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;619 memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);620 pIemCpu->cbOpcode = cbNew;621 return VINF_SUCCESS;622 }623 #endif624 625 /*626 * Read the bytes at this address.627 */628 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);629 if (cbToTryRead > cbLeftOnPage)630 cbToTryRead = cbLeftOnPage;631 if (cbToTryRead > sizeof(pIemCpu->abOpcode))632 cbToTryRead = sizeof(pIemCpu->abOpcode);633 /** @todo patch manager */634 if (!pIemCpu->fByPassHandlers)635 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);636 else637 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);638 if (rc != VINF_SUCCESS)639 return rc;640 pIemCpu->cbOpcode = cbToTryRead;641 642 return VINF_SUCCESS;643 }644 645 646 /**647 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate648 * exception if it fails.649 *650 * @returns Strict VBox status code.651 * @param pIemCpu The IEM state.652 * @param cbMin Where to return the opcode byte.653 */654 static VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)655 {656 /*657 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.658 *659 * First translate CS:rIP to a physical address.660 */661 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);662 uint32_t cbToTryRead;663 RTGCPTR GCPtrNext;664 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)665 {666 cbToTryRead = PAGE_SIZE;667 GCPtrNext = pCtx->rip + pIemCpu->cbOpcode;668 if (!IEM_IS_CANONICAL(GCPtrNext))669 return iemRaiseGeneralProtectionFault0(pIemCpu);670 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);671 Assert(cbToTryRead >= cbMin); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */672 }673 else674 {675 uint32_t GCPtrNext32 = pCtx->eip;676 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);677 GCPtrNext32 += pIemCpu->cbOpcode;678 if (GCPtrNext32 > pCtx->csHid.u32Limit)679 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);680 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;681 if (cbToTryRead < cbMin)682 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);683 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32;684 }685 686 RTGCPHYS GCPhys;687 uint64_t fFlags;688 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);689 if (RT_FAILURE(rc))690 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);691 if ((fFlags & X86_PTE_US) && pIemCpu->uCpl == 2)692 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);693 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))694 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);695 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;696 /** @todo Check reserved bits and such stuff. PGM is better at doing697 * that, so do it when implementing the guest virtual address698 * TLB... */699 700 /*701 * Read the bytes at this address.702 */703 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);704 if (cbToTryRead > cbLeftOnPage)705 cbToTryRead = cbLeftOnPage;706 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)707 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;708 if (!pIemCpu->fByPassHandlers)709 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);710 else711 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);712 if (rc != VINF_SUCCESS)713 return rc;714 pIemCpu->cbOpcode += cbToTryRead;715 716 return VINF_SUCCESS;717 }718 719 720 /**721 * Deals with the problematic cases that iemOpcodeGetNextByte doesn't like.722 *723 * @returns Strict VBox status code.724 * @param pIemCpu The IEM state.725 * @param pb Where to return the opcode byte.726 */727 static VBOXSTRICTRC iemOpcodeGetNextByteSlow(PIEMCPU pIemCpu, uint8_t *pb)728 {729 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);730 if (rcStrict == VINF_SUCCESS)731 {732 uint8_t offOpcode = pIemCpu->offOpcode;733 *pb = pIemCpu->abOpcode[offOpcode];734 pIemCpu->offOpcode = offOpcode + 1;735 }736 else737 *pb = 0;738 return rcStrict;739 }740 741 742 /**743 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.744 *745 * @returns Strict VBox status code.746 * @param pIemCpu The IEM state.747 * @param pu16 Where to return the opcode dword.748 */749 static VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)750 {751 uint8_t u8;752 VBOXSTRICTRC rcStrict = iemOpcodeGetNextByteSlow(pIemCpu, &u8);753 if (rcStrict == VINF_SUCCESS)754 *pu16 = (int8_t)u8;755 return rcStrict;756 }757 758 759 /**760 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.761 *762 * @returns Strict VBox status code.763 * @param pIemCpu The IEM state.764 * @param pu16 Where to return the opcode word.765 */766 static VBOXSTRICTRC iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)767 {768 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);769 if (rcStrict == VINF_SUCCESS)770 {771 uint8_t offOpcode = pIemCpu->offOpcode;772 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);773 pIemCpu->offOpcode = offOpcode + 2;774 }775 else776 *pu16 = 0;777 return rcStrict;778 }779 780 781 /**782 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.783 *784 * @returns Strict VBox status code.785 * @param pIemCpu The IEM state.786 * @param pu32 Where to return the opcode dword.787 */788 static VBOXSTRICTRC iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)789 {790 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);791 if (rcStrict == VINF_SUCCESS)792 {793 uint8_t offOpcode = pIemCpu->offOpcode;794 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],795 pIemCpu->abOpcode[offOpcode + 1],796 pIemCpu->abOpcode[offOpcode + 2],797 pIemCpu->abOpcode[offOpcode + 3]);798 pIemCpu->offOpcode = offOpcode + 4;799 }800 else801 *pu32 = 0;802 return rcStrict;803 }804 805 806 /**807 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.808 *809 * @returns Strict VBox status code.810 * @param pIemCpu The IEM state.811 * @param pu64 Where to return the opcode qword.812 */813 static VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)814 {815 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);816 if (rcStrict == VINF_SUCCESS)817 {818 uint8_t offOpcode = pIemCpu->offOpcode;819 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],820 pIemCpu->abOpcode[offOpcode + 1],821 pIemCpu->abOpcode[offOpcode + 2],822 pIemCpu->abOpcode[offOpcode + 3]);823 pIemCpu->offOpcode = offOpcode + 4;824 }825 else826 *pu64 = 0;827 return rcStrict;828 }829 830 831 /**832 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.833 *834 * @returns Strict VBox status code.835 * @param pIemCpu The IEM state.836 * @param pu64 Where to return the opcode qword.837 */838 static VBOXSTRICTRC iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)839 {840 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);841 if (rcStrict == VINF_SUCCESS)842 {843 uint8_t offOpcode = pIemCpu->offOpcode;844 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],845 pIemCpu->abOpcode[offOpcode + 1],846 pIemCpu->abOpcode[offOpcode + 2],847 pIemCpu->abOpcode[offOpcode + 3],848 pIemCpu->abOpcode[offOpcode + 4],849 pIemCpu->abOpcode[offOpcode + 5],850 pIemCpu->abOpcode[offOpcode + 6],851 pIemCpu->abOpcode[offOpcode + 7]);852 pIemCpu->offOpcode = offOpcode + 8;853 }854 else855 *pu64 = 0;856 return rcStrict;857 }858 859 860 /**861 * Fetches the next opcode byte.862 *863 * @returns Strict VBox status code.864 * @param pIemCpu The IEM state.865 * @param pu8 Where to return the opcode byte.866 */867 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)868 {869 uint8_t const offOpcode = pIemCpu->offOpcode;870 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))871 return iemOpcodeGetNextByteSlow(pIemCpu, pu8);872 873 *pu8 = pIemCpu->abOpcode[offOpcode];874 pIemCpu->offOpcode = offOpcode + 1;875 return VINF_SUCCESS;876 }877 878 /**879 * Fetches the next opcode byte, returns automatically on failure.880 *881 * @param pIemCpu The IEM state.882 * @param a_pu8 Where to return the opcode byte.883 */884 #define IEM_OPCODE_GET_NEXT_BYTE(a_pIemCpu, a_pu8) \885 do \886 { \887 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8((a_pIemCpu), (a_pu8)); \888 if (rcStrict2 != VINF_SUCCESS) \889 return rcStrict2; \890 } while (0)891 892 893 /**894 * Fetches the next signed byte from the opcode stream.895 *896 * @returns Strict VBox status code.897 * @param pIemCpu The IEM state.898 * @param pi8 Where to return the signed byte.899 */900 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)901 {902 return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);903 }904 905 /**906 * Fetches the next signed byte from the opcode stream, returning automatically907 * on failure.908 *909 * @param pIemCpu The IEM state.910 * @param pi8 Where to return the signed byte.911 */912 #define IEM_OPCODE_GET_NEXT_S8(a_pIemCpu, a_pi8) \913 do \914 { \915 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8((a_pIemCpu), (a_pi8)); \916 if (rcStrict2 != VINF_SUCCESS) \917 return rcStrict2; \918 } while (0)919 920 921 /**922 * Fetches the next signed byte from the opcode stream, extending it to923 * unsigned 16-bit.924 *925 * @returns Strict VBox status code.926 * @param pIemCpu The IEM state.927 * @param pu16 Where to return the unsigned word.928 */929 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)930 {931 uint8_t const offOpcode = pIemCpu->offOpcode;932 if (RT_UNLIKELY(offOpcode >= pIemCpu->cbOpcode))933 return iemOpcodeGetNextS8SxU16Slow(pIemCpu, pu16);934 935 *pu16 = (int8_t)pIemCpu->abOpcode[offOpcode];936 pIemCpu->offOpcode = offOpcode + 1;937 return VINF_SUCCESS;938 }939 940 941 /**942 * Fetches the next signed byte from the opcode stream and sign-extending it to943 * a word, returning automatically on failure.944 *945 * @param pIemCpu The IEM state.946 * @param pu16 Where to return the word.947 */948 #define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pIemCpu, a_pu16) \949 do \950 { \951 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16((a_pIemCpu), (a_pu16)); \952 if (rcStrict2 != VINF_SUCCESS) \953 return rcStrict2; \954 } while (0)955 956 957 /**958 * Fetches the next opcode word.959 *960 * @returns Strict VBox status code.961 * @param pIemCpu The IEM state.962 * @param pu16 Where to return the opcode word.963 */964 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)965 {966 uint8_t const offOpcode = pIemCpu->offOpcode;967 if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))968 return iemOpcodeGetNextU16Slow(pIemCpu, pu16);969 970 *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);971 pIemCpu->offOpcode = offOpcode + 2;972 return VINF_SUCCESS;973 }974 975 /**976 * Fetches the next opcode word, returns automatically on failure.977 *978 * @param pIemCpu The IEM state.979 * @param a_pu16 Where to return the opcode word.980 */981 #define IEM_OPCODE_GET_NEXT_U16(a_pIemCpu, a_pu16) \982 do \983 { \984 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16((a_pIemCpu), (a_pu16)); \985 if (rcStrict2 != VINF_SUCCESS) \986 return rcStrict2; \987 } while (0)988 989 990 /**991 * Fetches the next opcode dword.992 *993 * @returns Strict VBox status code.994 * @param pIemCpu The IEM state.995 * @param pu32 Where to return the opcode double word.996 */997 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)998 {999 uint8_t const offOpcode = pIemCpu->offOpcode;1000 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))1001 return iemOpcodeGetNextU32Slow(pIemCpu, pu32);1002 1003 *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],1004 pIemCpu->abOpcode[offOpcode + 1],1005 pIemCpu->abOpcode[offOpcode + 2],1006 pIemCpu->abOpcode[offOpcode + 3]);1007 pIemCpu->offOpcode = offOpcode + 4;1008 return VINF_SUCCESS;1009 }1010 1011 /**1012 * Fetches the next opcode dword, returns automatically on failure.1013 *1014 * @param pIemCpu The IEM state.1015 * @param a_u32 Where to return the opcode dword.1016 */1017 #define IEM_OPCODE_GET_NEXT_U32(a_pIemCpu, a_pu32) \1018 do \1019 { \1020 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32((a_pIemCpu), (a_pu32)); \1021 if (rcStrict2 != VINF_SUCCESS) \1022 return rcStrict2; \1023 } while (0)1024 1025 1026 /**1027 * Fetches the next opcode dword, sign extending it into a quad word.1028 *1029 * @returns Strict VBox status code.1030 * @param pIemCpu The IEM state.1031 * @param pu64 Where to return the opcode quad word.1032 */1033 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)1034 {1035 uint8_t const offOpcode = pIemCpu->offOpcode;1036 if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))1037 return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);1038 1039 int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],1040 pIemCpu->abOpcode[offOpcode + 1],1041 pIemCpu->abOpcode[offOpcode + 2],1042 pIemCpu->abOpcode[offOpcode + 3]);1043 *pu64 = i32;1044 pIemCpu->offOpcode = offOpcode + 4;1045 return VINF_SUCCESS;1046 }1047 1048 /**1049 * Fetches the next opcode double word and sign extends it to a quad word,1050 * returns automatically on failure.1051 *1052 * @param pIemCpu The IEM state.1053 * @param a_pu64 Where to return the opcode quad word.1054 */1055 #define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pIemCpu, a_pu64) \1056 do \1057 { \1058 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64((a_pIemCpu), (a_pu64)); \1059 if (rcStrict2 != VINF_SUCCESS) \1060 return rcStrict2; \1061 } while (0)1062 1063 1064 /**1065 * Fetches the next opcode qword.1066 *1067 * @returns Strict VBox status code.1068 * @param pIemCpu The IEM state.1069 * @param pu64 Where to return the opcode qword.1070 */1071 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)1072 {1073 uint8_t const offOpcode = pIemCpu->offOpcode;1074 if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))1075 return iemOpcodeGetNextU64Slow(pIemCpu, pu64);1076 1077 *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],1078 pIemCpu->abOpcode[offOpcode + 1],1079 pIemCpu->abOpcode[offOpcode + 2],1080 pIemCpu->abOpcode[offOpcode + 3],1081 pIemCpu->abOpcode[offOpcode + 4],1082 pIemCpu->abOpcode[offOpcode + 5],1083 pIemCpu->abOpcode[offOpcode + 6],1084 pIemCpu->abOpcode[offOpcode + 7]);1085 pIemCpu->offOpcode = offOpcode + 8;1086 return VINF_SUCCESS;1087 }1088 1089 /**1090 * Fetches the next opcode word, returns automatically on failure.1091 *1092 * @param pIemCpu The IEM state.1093 * @param a_pu64 Where to return the opcode qword.1094 */1095 #define IEM_OPCODE_GET_NEXT_U64(a_pIemCpu, a_pu64) \1096 do \1097 { \1098 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64((a_pIemCpu), (a_pu64)); \1099 if (rcStrict2 != VINF_SUCCESS) \1100 return rcStrict2; \1101 } while (0)1102 1103 1104 /** @name Raising Exceptions.1105 *1106 * @{1107 */1108 1109 static VBOXSTRICTRC iemRaiseDivideError(PIEMCPU pIemCpu)1110 {1111 AssertFailed(/** @todo implement this */);1112 return VERR_NOT_IMPLEMENTED;1113 }1114 1115 1116 static VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr)1117 {1118 AssertFailed(/** @todo implement this */);1119 return VERR_NOT_IMPLEMENTED;1120 }1121 1122 1123 static VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu)1124 {1125 AssertFailed(/** @todo implement this */);1126 return VERR_NOT_IMPLEMENTED;1127 }1128 1129 1130 static VBOXSTRICTRC iemRaiseNotCanonical(PIEMCPU pIemCpu)1131 {1132 AssertFailed(/** @todo implement this */);1133 return VERR_NOT_IMPLEMENTED;1134 }1135 1136 1137 static VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)1138 {1139 AssertFailed(/** @todo implement this */);1140 return VERR_NOT_IMPLEMENTED;1141 }1142 1143 1144 static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)1145 {1146 AssertFailed(/** @todo implement this */);1147 return VERR_NOT_IMPLEMENTED;1148 }1149 1150 1151 static VBOXSTRICTRC iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)1152 {1153 AssertFailed(/** @todo implement this */);1154 return VERR_NOT_IMPLEMENTED;1155 }1156 1157 1158 static VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)1159 {1160 AssertFailed(/** @todo implement this */);1161 return VERR_NOT_IMPLEMENTED;1162 }1163 1164 1165 static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)1166 {1167 AssertFailed(/** @todo implement this */);1168 return VERR_NOT_IMPLEMENTED;1169 }1170 1171 1172 /**1173 * Macro for calling iemCImplRaiseInvalidLockPrefix().1174 *1175 * This enables us to add/remove arguments and force different levels of1176 * inlining as we wish.1177 *1178 * @return Strict VBox status code.1179 */1180 #define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)1181 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)1182 {1183 AssertFailed();1184 return VERR_NOT_IMPLEMENTED;1185 }1186 1187 1188 /**1189 * Macro for calling iemCImplRaiseInvalidOpcode().1190 *1191 * This enables us to add/remove arguments and force different levels of1192 * inlining as we wish.1193 *1194 * @return Strict VBox status code.1195 */1196 #define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)1197 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)1198 {1199 AssertFailed();1200 return VERR_NOT_IMPLEMENTED;1201 }1202 1203 1204 /** @} */1205 1206 1207 /*1208 *1209 * Helpers routines.1210 * Helpers routines.1211 * Helpers routines.1212 *1213 */1214 1215 /**1216 * Recalculates the effective operand size.1217 *1218 * @param pIemCpu The IEM state.1219 */1220 static void iemRecalEffOpSize(PIEMCPU pIemCpu)1221 {1222 switch (pIemCpu->enmCpuMode)1223 {1224 case IEMMODE_16BIT:1225 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;1226 break;1227 case IEMMODE_32BIT:1228 pIemCpu->enmEffOpSize = pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;1229 break;1230 case IEMMODE_64BIT:1231 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))1232 {1233 case 0:1234 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize;1235 break;1236 case IEM_OP_PRF_SIZE_OP:1237 pIemCpu->enmEffOpSize = IEMMODE_16BIT;1238 break;1239 case IEM_OP_PRF_SIZE_REX_W:1240 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:1241 pIemCpu->enmEffOpSize = IEMMODE_64BIT;1242 break;1243 }1244 break;1245 default:1246 AssertFailed();1247 }1248 }1249 1250 1251 /**1252 * Sets the default operand size to 64-bit and recalculates the effective1253 * operand size.1254 *1255 * @param pIemCpu The IEM state.1256 */1257 static void iemRecalEffOpSize64Default(PIEMCPU pIemCpu)1258 {1259 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);1260 pIemCpu->enmDefOpSize = IEMMODE_64BIT;1261 if ((pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)1262 pIemCpu->enmEffOpSize = IEMMODE_64BIT;1263 else1264 pIemCpu->enmEffOpSize = IEMMODE_16BIT;1265 }1266 1267 1268 /*1269 *1270 * Common opcode decoders.1271 * Common opcode decoders.1272 * Common opcode decoders.1273 *1274 */1275 #include <iprt/mem.h>1276 1277 /**1278 * Used to add extra details about a stub case.1279 * @param pIemCpu The IEM per CPU state.1280 */1281 static void iemOpStubMsg2(PIEMCPU pIemCpu)1282 {1283 PVM pVM = IEMCPU_TO_VM(pIemCpu);1284 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);1285 char szRegs[4096];1286 DBGFR3RegPrintf(pVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),1287 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"1288 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"1289 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"1290 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"1291 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"1292 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"1293 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"1294 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"1295 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"1296 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"1297 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"1298 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"1299 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"1300 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"1301 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"1302 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"1303 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"1304 " efer=%016VR{efer}\n"1305 " pat=%016VR{pat}\n"1306 " sf_mask=%016VR{sf_mask}\n"1307 "krnl_gs_base=%016VR{krnl_gs_base}\n"1308 " lstar=%016VR{lstar}\n"1309 " star=%016VR{star} cstar=%016VR{cstar}\n"1310 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"1311 );1312 1313 char szInstr[256];1314 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, 0, 0,1315 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,1316 szInstr, sizeof(szInstr), NULL);1317 1318 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);1319 }1320 1321 1322 /** Stubs an opcode. */1323 #define FNIEMOP_STUB(a_Name) \1324 FNIEMOP_DEF(a_Name) \1325 { \1326 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \1327 iemOpStubMsg2(pIemCpu); \1328 RTAssertPanic(); \1329 return VERR_NOT_IMPLEMENTED; \1330 } \1331 typedef int ignore_semicolon1332 1333 /** Stubs an opcode. */1334 #define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \1335 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \1336 { \1337 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \1338 iemOpStubMsg2(pIemCpu); \1339 RTAssertPanic(); \1340 return VERR_NOT_IMPLEMENTED; \1341 } \1342 typedef int ignore_semicolon1343 1344 1345 1346 /** @name Register Access.1347 * @{1348 */1349 1350 /**1351 * Gets a reference (pointer) to the specified hidden segment register.1352 *1353 * @returns Hidden register reference.1354 * @param pIemCpu The per CPU data.1355 * @param iSegReg The segment register.1356 */1357 static PCPUMSELREGHID iemSRegGetHid(PIEMCPU pIemCpu, uint8_t iSegReg)1358 {1359 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1360 switch (iSegReg)1361 {1362 case X86_SREG_ES: return &pCtx->esHid;1363 case X86_SREG_CS: return &pCtx->csHid;1364 case X86_SREG_SS: return &pCtx->ssHid;1365 case X86_SREG_DS: return &pCtx->dsHid;1366 case X86_SREG_FS: return &pCtx->fsHid;1367 case X86_SREG_GS: return &pCtx->gsHid;1368 }1369 AssertFailedReturn(NULL);1370 }1371 1372 1373 /**1374 * Gets a reference (pointer) to the specified segment register (the selector1375 * value).1376 *1377 * @returns Pointer to the selector variable.1378 * @param pIemCpu The per CPU data.1379 * @param iSegReg The segment register.1380 */1381 static uint16_t *iemSRegRef(PIEMCPU pIemCpu, uint8_t iSegReg)1382 {1383 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1384 switch (iSegReg)1385 {1386 case X86_SREG_ES: return &pCtx->es;1387 case X86_SREG_CS: return &pCtx->cs;1388 case X86_SREG_SS: return &pCtx->ss;1389 case X86_SREG_DS: return &pCtx->ds;1390 case X86_SREG_FS: return &pCtx->fs;1391 case X86_SREG_GS: return &pCtx->gs;1392 }1393 AssertFailedReturn(NULL);1394 }1395 1396 1397 /**1398 * Fetches the selector value of a segment register.1399 *1400 * @returns The selector value.1401 * @param pIemCpu The per CPU data.1402 * @param iSegReg The segment register.1403 */1404 static uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg)1405 {1406 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1407 switch (iSegReg)1408 {1409 case X86_SREG_ES: return pCtx->es;1410 case X86_SREG_CS: return pCtx->cs;1411 case X86_SREG_SS: return pCtx->ss;1412 case X86_SREG_DS: return pCtx->ds;1413 case X86_SREG_FS: return pCtx->fs;1414 case X86_SREG_GS: return pCtx->gs;1415 }1416 AssertFailedReturn(0xffff);1417 }1418 1419 1420 /**1421 * Gets a reference (pointer) to the specified general register.1422 *1423 * @returns Register reference.1424 * @param pIemCpu The per CPU data.1425 * @param iReg The general register.1426 */1427 static void *iemGRegRef(PIEMCPU pIemCpu, uint8_t iReg)1428 {1429 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1430 switch (iReg)1431 {1432 case X86_GREG_xAX: return &pCtx->rax;1433 case X86_GREG_xCX: return &pCtx->rcx;1434 case X86_GREG_xDX: return &pCtx->rdx;1435 case X86_GREG_xBX: return &pCtx->rbx;1436 case X86_GREG_xSP: return &pCtx->rsp;1437 case X86_GREG_xBP: return &pCtx->rbp;1438 case X86_GREG_xSI: return &pCtx->rsi;1439 case X86_GREG_xDI: return &pCtx->rdi;1440 case X86_GREG_x8: return &pCtx->r8;1441 case X86_GREG_x9: return &pCtx->r9;1442 case X86_GREG_x10: return &pCtx->r10;1443 case X86_GREG_x11: return &pCtx->r11;1444 case X86_GREG_x12: return &pCtx->r12;1445 case X86_GREG_x13: return &pCtx->r13;1446 case X86_GREG_x14: return &pCtx->r14;1447 case X86_GREG_x15: return &pCtx->r15;1448 }1449 AssertFailedReturn(NULL);1450 }1451 1452 1453 /**1454 * Gets a reference (pointer) to the specified 8-bit general register.1455 *1456 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.1457 *1458 * @returns Register reference.1459 * @param pIemCpu The per CPU data.1460 * @param iReg The register.1461 */1462 static uint8_t *iemGRegRefU8(PIEMCPU pIemCpu, uint8_t iReg)1463 {1464 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX)1465 return (uint8_t *)iemGRegRef(pIemCpu, iReg);1466 1467 uint8_t *pu8Reg = (uint8_t *)iemGRegRef(pIemCpu, iReg & 3);1468 if (iReg >= 4)1469 pu8Reg++;1470 return pu8Reg;1471 }1472 1473 1474 /**1475 * Fetches the value of a 8-bit general register.1476 *1477 * @returns The register value.1478 * @param pIemCpu The per CPU data.1479 * @param iReg The register.1480 */1481 static uint8_t iemGRegFetchU8(PIEMCPU pIemCpu, uint8_t iReg)1482 {1483 uint8_t const *pbSrc = iemGRegRefU8(pIemCpu, iReg);1484 return *pbSrc;1485 }1486 1487 1488 /**1489 * Fetches the value of a 16-bit general register.1490 *1491 * @returns The register value.1492 * @param pIemCpu The per CPU data.1493 * @param iReg The register.1494 */1495 static uint16_t iemGRegFetchU16(PIEMCPU pIemCpu, uint8_t iReg)1496 {1497 return *(uint16_t *)iemGRegRef(pIemCpu, iReg);1498 }1499 1500 1501 /**1502 * Fetches the value of a 32-bit general register.1503 *1504 * @returns The register value.1505 * @param pIemCpu The per CPU data.1506 * @param iReg The register.1507 */1508 static uint32_t iemGRegFetchU32(PIEMCPU pIemCpu, uint8_t iReg)1509 {1510 return *(uint32_t *)iemGRegRef(pIemCpu, iReg);1511 }1512 1513 1514 /**1515 * Fetches the value of a 64-bit general register.1516 *1517 * @returns The register value.1518 * @param pIemCpu The per CPU data.1519 * @param iReg The register.1520 */1521 static uint64_t iemGRegFetchU64(PIEMCPU pIemCpu, uint8_t iReg)1522 {1523 return *(uint64_t *)iemGRegRef(pIemCpu, iReg);1524 }1525 1526 1527 /**1528 * Adds a 8-bit signed jump offset to RIP/EIP/IP.1529 *1530 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code1531 * segment limit.1532 *1533 * @param pIemCpu The per CPU data.1534 * @param offNextInstr The offset of the next instruction.1535 */1536 static VBOXSTRICTRC iemRegRipRelativeJumpS8(PIEMCPU pIemCpu, int8_t offNextInstr)1537 {1538 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1539 switch (pIemCpu->enmEffOpSize)1540 {1541 case IEMMODE_16BIT:1542 {1543 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;1544 if ( uNewIp > pCtx->csHid.u32Limit1545 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */1546 return iemRaiseGeneralProtectionFault0(pIemCpu);1547 pCtx->rip = uNewIp;1548 break;1549 }1550 1551 case IEMMODE_32BIT:1552 {1553 Assert(pCtx->rip <= UINT32_MAX);1554 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);1555 1556 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;1557 if (uNewEip > pCtx->csHid.u32Limit)1558 return iemRaiseGeneralProtectionFault0(pIemCpu);1559 pCtx->rip = uNewEip;1560 break;1561 }1562 1563 case IEMMODE_64BIT:1564 {1565 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);1566 1567 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;1568 if (!IEM_IS_CANONICAL(uNewRip))1569 return iemRaiseGeneralProtectionFault0(pIemCpu);1570 pCtx->rip = uNewRip;1571 break;1572 }1573 1574 IEM_NOT_REACHED_DEFAULT_CASE_RET();1575 }1576 1577 return VINF_SUCCESS;1578 }1579 1580 1581 /**1582 * Adds a 16-bit signed jump offset to RIP/EIP/IP.1583 *1584 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code1585 * segment limit.1586 *1587 * @returns Strict VBox status code.1588 * @param pIemCpu The per CPU data.1589 * @param offNextInstr The offset of the next instruction.1590 */1591 static VBOXSTRICTRC iemRegRipRelativeJumpS16(PIEMCPU pIemCpu, int16_t offNextInstr)1592 {1593 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1594 Assert(pIemCpu->enmEffOpSize == IEMMODE_16BIT);1595 1596 uint16_t uNewIp = pCtx->ip + offNextInstr + pIemCpu->offOpcode;1597 if ( uNewIp > pCtx->csHid.u32Limit1598 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */1599 return iemRaiseGeneralProtectionFault0(pIemCpu);1600 /** @todo Test 16-bit jump in 64-bit mode. */1601 pCtx->rip = uNewIp;1602 1603 return VINF_SUCCESS;1604 }1605 1606 1607 /**1608 * Adds a 32-bit signed jump offset to RIP/EIP/IP.1609 *1610 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code1611 * segment limit.1612 *1613 * @returns Strict VBox status code.1614 * @param pIemCpu The per CPU data.1615 * @param offNextInstr The offset of the next instruction.1616 */1617 static VBOXSTRICTRC iemRegRipRelativeJumpS32(PIEMCPU pIemCpu, int32_t offNextInstr)1618 {1619 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1620 Assert(pIemCpu->enmEffOpSize != IEMMODE_16BIT);1621 1622 if (pIemCpu->enmEffOpSize == IEMMODE_32BIT)1623 {1624 Assert(pCtx->rip <= UINT32_MAX); Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);1625 1626 uint32_t uNewEip = pCtx->eip + offNextInstr + pIemCpu->offOpcode;1627 if (uNewEip > pCtx->csHid.u32Limit)1628 return iemRaiseGeneralProtectionFault0(pIemCpu);1629 pCtx->rip = uNewEip;1630 }1631 else1632 {1633 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);1634 1635 uint64_t uNewRip = pCtx->rip + offNextInstr + pIemCpu->offOpcode;1636 if (!IEM_IS_CANONICAL(uNewRip))1637 return iemRaiseGeneralProtectionFault0(pIemCpu);1638 pCtx->rip = uNewRip;1639 }1640 return VINF_SUCCESS;1641 }1642 1643 1644 /**1645 * Performs a near jump to the specified address.1646 *1647 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code1648 * segment limit.1649 *1650 * @param pIemCpu The per CPU data.1651 * @param uNewRip The new RIP value.1652 */1653 static VBOXSTRICTRC iemRegRipJump(PIEMCPU pIemCpu, uint64_t uNewRip)1654 {1655 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1656 switch (pIemCpu->enmEffOpSize)1657 {1658 case IEMMODE_16BIT:1659 {1660 Assert(uNewRip <= UINT16_MAX);1661 if ( uNewRip > pCtx->csHid.u32Limit1662 && pIemCpu->enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */1663 return iemRaiseGeneralProtectionFault0(pIemCpu);1664 /** @todo Test 16-bit jump in 64-bit mode. */1665 pCtx->rip = uNewRip;1666 break;1667 }1668 1669 case IEMMODE_32BIT:1670 {1671 Assert(uNewRip <= UINT32_MAX);1672 Assert(pCtx->rip <= UINT32_MAX);1673 Assert(pIemCpu->enmCpuMode != IEMMODE_64BIT);1674 1675 if (uNewRip > pCtx->csHid.u32Limit)1676 return iemRaiseGeneralProtectionFault0(pIemCpu);1677 pCtx->rip = uNewRip;1678 break;1679 }1680 1681 case IEMMODE_64BIT:1682 {1683 Assert(pIemCpu->enmCpuMode == IEMMODE_64BIT);1684 1685 if (!IEM_IS_CANONICAL(uNewRip))1686 return iemRaiseGeneralProtectionFault0(pIemCpu);1687 pCtx->rip = uNewRip;1688 break;1689 }1690 1691 IEM_NOT_REACHED_DEFAULT_CASE_RET();1692 }1693 1694 return VINF_SUCCESS;1695 }1696 1697 1698 /**1699 * Get the address of the top of the stack.1700 *1701 * @param pCtx The CPU context which SP/ESP/RSP should be1702 * read.1703 */1704 DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCCPUMCTX pCtx)1705 {1706 if (pCtx->ssHid.Attr.n.u1Long)1707 return pCtx->rsp;1708 if (pCtx->ssHid.Attr.n.u1DefBig)1709 return pCtx->esp;1710 return pCtx->sp;1711 }1712 1713 1714 /**1715 * Updates the RIP/EIP/IP to point to the next instruction.1716 *1717 * @param pIemCpu The per CPU data.1718 * @param cbInstr The number of bytes to add.1719 */1720 static void iemRegAddToRip(PIEMCPU pIemCpu, uint8_t cbInstr)1721 {1722 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);1723 switch (pIemCpu->enmCpuMode)1724 {1725 case IEMMODE_16BIT:1726 Assert(pCtx->rip <= UINT16_MAX);1727 pCtx->eip += cbInstr;1728 pCtx->eip &= UINT32_C(0xffff);1729 break;1730 1731 case IEMMODE_32BIT:1732 pCtx->eip += cbInstr;1733 Assert(pCtx->rip <= UINT32_MAX);1734 break;1735 1736 case IEMMODE_64BIT:1737 pCtx->rip += cbInstr;1738 break;1739 default: AssertFailed();1740 }1741 }1742 1743 1744 /**1745 * Updates the RIP/EIP/IP to point to the next instruction.1746 *1747 * @param pIemCpu The per CPU data.1748 */1749 static void iemRegUpdateRip(PIEMCPU pIemCpu)1750 {1751 return iemRegAddToRip(pIemCpu, pIemCpu->offOpcode);1752 }1753 1754 1755 /**1756 * Adds to the stack pointer.1757 *1758 * @param pCtx The CPU context which SP/ESP/RSP should be1759 * updated.1760 * @param cbToAdd The number of bytes to add.1761 */1762 DECLINLINE(void) iemRegAddToRsp(PCPUMCTX pCtx, uint8_t cbToAdd)1763 {1764 if (pCtx->ssHid.Attr.n.u1Long)1765 pCtx->rsp += cbToAdd;1766 else if (pCtx->ssHid.Attr.n.u1DefBig)1767 pCtx->esp += cbToAdd;1768 else1769 pCtx->sp += cbToAdd;1770 }1771 1772 1773 /**1774 * Subtracts from the stack pointer.1775 *1776 * @param pCtx The CPU context which SP/ESP/RSP should be1777 * updated.1778 * @param cbToSub The number of bytes to subtract.1779 */1780 DECLINLINE(void) iemRegSubFromRsp(PCPUMCTX pCtx, uint8_t cbToSub)1781 {1782 if (pCtx->ssHid.Attr.n.u1Long)1783 pCtx->rsp -= cbToSub;1784 else if (pCtx->ssHid.Attr.n.u1DefBig)1785 pCtx->esp -= cbToSub;1786 else1787 pCtx->sp -= cbToSub;1788 }1789 1790 1791 /**1792 * Adds to the temporary stack pointer.1793 *1794 * @param pTmpRsp The temporary SP/ESP/RSP to update.1795 * @param cbToAdd The number of bytes to add.1796 * @param pCtx Where to get the current stack mode.1797 */1798 DECLINLINE(void) iemRegAddToRspEx(PRTUINT64U pTmpRsp, uint8_t cbToAdd, PCCPUMCTX pCtx)1799 {1800 if (pCtx->ssHid.Attr.n.u1Long)1801 pTmpRsp->u += cbToAdd;1802 else if (pCtx->ssHid.Attr.n.u1DefBig)1803 pTmpRsp->DWords.dw0 += cbToAdd;1804 else1805 pTmpRsp->Words.w0 += cbToAdd;1806 }1807 1808 1809 /**1810 * Subtracts from the temporary stack pointer.1811 *1812 * @param pTmpRsp The temporary SP/ESP/RSP to update.1813 * @param cbToSub The number of bytes to subtract.1814 * @param pCtx Where to get the current stack mode.1815 */1816 DECLINLINE(void) iemRegSubFromRspEx(PRTUINT64U pTmpRsp, uint8_t cbToSub, PCCPUMCTX pCtx)1817 {1818 if (pCtx->ssHid.Attr.n.u1Long)1819 pTmpRsp->u -= cbToSub;1820 else if (pCtx->ssHid.Attr.n.u1DefBig)1821 pTmpRsp->DWords.dw0 -= cbToSub;1822 else1823 pTmpRsp->Words.w0 -= cbToSub;1824 }1825 1826 1827 /**1828 * Calculates the effective stack address for a push of the specified size as1829 * well as the new RSP value (upper bits may be masked).1830 *1831 * @returns Effective stack addressf for the push.1832 * @param pCtx Where to get the current stack mode.1833 * @param cbItem The size of the stack item to pop.1834 * @param puNewRsp Where to return the new RSP value.1835 */1836 DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)1837 {1838 RTUINT64U uTmpRsp;1839 RTGCPTR GCPtrTop;1840 uTmpRsp.u = pCtx->rsp;1841 1842 if (pCtx->ssHid.Attr.n.u1Long)1843 GCPtrTop = uTmpRsp.u -= cbItem;1844 else if (pCtx->ssHid.Attr.n.u1DefBig)1845 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;1846 else1847 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;1848 *puNewRsp = uTmpRsp.u;1849 return GCPtrTop;1850 }1851 1852 1853 /**1854 * Gets the current stack pointer and calculates the value after a pop of the1855 * specified size.1856 *1857 * @returns Current stack pointer.1858 * @param pCtx Where to get the current stack mode.1859 * @param cbItem The size of the stack item to pop.1860 * @param puNewRsp Where to return the new RSP value.1861 */1862 DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)1863 {1864 RTUINT64U uTmpRsp;1865 RTGCPTR GCPtrTop;1866 uTmpRsp.u = pCtx->rsp;1867 1868 if (pCtx->ssHid.Attr.n.u1Long)1869 {1870 GCPtrTop = uTmpRsp.u;1871 uTmpRsp.u += cbItem;1872 }1873 else if (pCtx->ssHid.Attr.n.u1DefBig)1874 {1875 GCPtrTop = uTmpRsp.DWords.dw0;1876 uTmpRsp.DWords.dw0 += cbItem;1877 }1878 else1879 {1880 GCPtrTop = uTmpRsp.Words.w0;1881 uTmpRsp.Words.w0 += cbItem;1882 }1883 *puNewRsp = uTmpRsp.u;1884 return GCPtrTop;1885 }1886 1887 1888 /**1889 * Calculates the effective stack address for a push of the specified size as1890 * well as the new temporary RSP value (upper bits may be masked).1891 *1892 * @returns Effective stack addressf for the push.1893 * @param pTmpRsp The temporary stack pointer. This is updated.1894 * @param cbItem The size of the stack item to pop.1895 * @param puNewRsp Where to return the new RSP value.1896 */1897 DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)1898 {1899 RTGCPTR GCPtrTop;1900 1901 if (pCtx->ssHid.Attr.n.u1Long)1902 GCPtrTop = pTmpRsp->u -= cbItem;1903 else if (pCtx->ssHid.Attr.n.u1DefBig)1904 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;1905 else1906 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;1907 return GCPtrTop;1908 }1909 1910 1911 /**1912 * Gets the effective stack address for a pop of the specified size and1913 * calculates and updates the temporary RSP.1914 *1915 * @returns Current stack pointer.1916 * @param pTmpRsp The temporary stack pointer. This is updated.1917 * @param pCtx Where to get the current stack mode.1918 * @param cbItem The size of the stack item to pop.1919 */1920 DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PRTUINT64U pTmpRsp, uint8_t cbItem, PCCPUMCTX pCtx)1921 {1922 RTGCPTR GCPtrTop;1923 if (pCtx->ssHid.Attr.n.u1Long)1924 {1925 GCPtrTop = pTmpRsp->u;1926 pTmpRsp->u += cbItem;1927 }1928 else if (pCtx->ssHid.Attr.n.u1DefBig)1929 {1930 GCPtrTop = pTmpRsp->DWords.dw0;1931 pTmpRsp->DWords.dw0 += cbItem;1932 }1933 else1934 {1935 GCPtrTop = pTmpRsp->Words.w0;1936 pTmpRsp->Words.w0 += cbItem;1937 }1938 return GCPtrTop;1939 }1940 1941 1942 /**1943 * Checks if an AMD CPUID feature bit is set.1944 *1945 * @returns true / false.1946 *1947 * @param pIemCpu The IEM per CPU data.1948 * @param fEdx The EDX bit to test, or 0 if ECX.1949 * @param fEcx The ECX bit to test, or 0 if EDX.1950 * @remarks Used via IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX.1951 */1952 static bool iemRegIsAmdCpuIdFeaturePresent(PIEMCPU pIemCpu, uint32_t fEdx, uint32_t fEcx)1953 {1954 uint32_t uEax, uEbx, uEcx, uEdx;1955 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), 0x80000001, &uEax, &uEbx, &uEcx, &uEdx);1956 return (fEcx && (uEcx & fEcx))1957 || (fEdx && (uEdx & fEdx));1958 }1959 1960 /** @} */1961 1962 1963 /** @name Memory access.1964 *1965 * @{1966 */1967 1968 1969 /**1970 * Checks if the given segment can be written to, raise the appropriate1971 * exception if not.1972 *1973 * @returns VBox strict status code.1974 *1975 * @param pIemCpu The IEM per CPU data.1976 * @param pHid Pointer to the hidden register.1977 * @param iSegReg The register number.1978 */1979 static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)1980 {1981 if (!pHid->Attr.n.u1Present)1982 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);1983 1984 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)1985 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )1986 && pIemCpu->enmCpuMode != IEMMODE_64BIT )1987 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_W);1988 1989 /** @todo DPL/RPL/CPL? */1990 1991 return VINF_SUCCESS;1992 }1993 1994 1995 /**1996 * Checks if the given segment can be read from, raise the appropriate1997 * exception if not.1998 *1999 * @returns VBox strict status code.2000 *2001 * @param pIemCpu The IEM per CPU data.2002 * @param pHid Pointer to the hidden register.2003 * @param iSegReg The register number.2004 */2005 static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)2006 {2007 if (!pHid->Attr.n.u1Present)2008 return iemRaiseSelectorNotPresentBySegReg(pIemCpu, iSegReg);2009 2010 if ( (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE2011 && pIemCpu->enmCpuMode != IEMMODE_64BIT )2012 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, IEM_ACCESS_DATA_R);2013 2014 /** @todo DPL/RPL/CPL? */2015 2016 return VINF_SUCCESS;2017 }2018 2019 2020 /**2021 * Applies the segment limit, base and attributes.2022 *2023 * This may raise a \#GP or \#SS.2024 *2025 * @returns VBox strict status code.2026 *2027 * @param pIemCpu The IEM per CPU data.2028 * @param fAccess The kind of access which is being performed.2029 * @param iSegReg The index of the segment register to apply.2030 * This is UINT8_MAX if none (for IDT, GDT, LDT,2031 * TSS, ++).2032 * @param pGCPtrMem Pointer to the guest memory address to apply2033 * segmentation to. Input and output parameter.2034 */2035 static VBOXSTRICTRC iemMemApplySegment(PIEMCPU pIemCpu, uint32_t fAccess, uint8_t iSegReg,2036 size_t cbMem, PRTGCPTR pGCPtrMem)2037 {2038 if (iSegReg == UINT8_MAX)2039 return VINF_SUCCESS;2040 2041 PCPUMSELREGHID pSel = iemSRegGetHid(pIemCpu, iSegReg);2042 switch (pIemCpu->enmCpuMode)2043 {2044 case IEMMODE_16BIT:2045 case IEMMODE_32BIT:2046 {2047 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;2048 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;2049 2050 Assert(pSel->Attr.n.u1Present);2051 Assert(pSel->Attr.n.u1DescType);2052 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))2053 {2054 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)2055 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )2056 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);2057 2058 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))2059 {2060 /** @todo CPL check. */2061 }2062 2063 /*2064 * There are two kinds of data selectors, normal and expand down.2065 */2066 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))2067 {2068 if ( GCPtrFirst32 > pSel->u32Limit2069 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */2070 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);2071 2072 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;2073 }2074 else2075 {2076 /** @todo implement expand down segments. */2077 AssertFailed(/** @todo implement this */);2078 return VERR_NOT_IMPLEMENTED;2079 }2080 }2081 else2082 {2083 2084 /*2085 * Code selector and usually be used to read thru, writing is2086 * only permitted in real and V8086 mode.2087 */2088 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)2089 || ( (fAccess & IEM_ACCESS_TYPE_READ)2090 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )2091 && !IEM_IS_REAL_OR_V86_MODE(pIemCpu) )2092 return iemRaiseSelectorInvalidAccess(pIemCpu, iSegReg, fAccess);2093 2094 if ( GCPtrFirst32 > pSel->u32Limit2095 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */2096 return iemRaiseSelectorBounds(pIemCpu, iSegReg, fAccess);2097 2098 if (!IEM_IS_REAL_OR_V86_MODE(pIemCpu))2099 {2100 /** @todo CPL check. */2101 }2102 2103 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;2104 }2105 return VINF_SUCCESS;2106 }2107 2108 case IEMMODE_64BIT:2109 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)2110 *pGCPtrMem += pSel->u64Base;2111 return VINF_SUCCESS;2112 2113 default:2114 AssertFailedReturn(VERR_INTERNAL_ERROR_5);2115 }2116 }2117 2118 2119 /**2120 * Translates a virtual address to a physical physical address and checks if we2121 * can access the page as specified.2122 *2123 * @param pIemCpu The IEM per CPU data.2124 * @param GCPtrMem The virtual address.2125 * @param fAccess The intended access.2126 * @param pGCPhysMem Where to return the physical address.2127 */2128 static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,2129 PRTGCPHYS pGCPhysMem)2130 {2131 /** @todo Need a different PGM interface here. We're currently using2132 * generic / REM interfaces. this won't cut it for R0 & RC. */2133 RTGCPHYS GCPhys;2134 uint64_t fFlags;2135 int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrMem, &fFlags, &GCPhys);2136 if (RT_FAILURE(rc))2137 {2138 /** @todo Check unassigned memory in unpaged mode. */2139 *pGCPhysMem = NIL_RTGCPHYS;2140 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, rc);2141 }2142 2143 if ( (fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US)2144 && ( ( (fAccess & IEM_ACCESS_TYPE_WRITE) /* Write to read only memory? */2145 && !(fFlags & X86_PTE_RW)2146 && ( pIemCpu->uCpl != 02147 || (pIemCpu->CTX_SUFF(pCtx)->cr0 & X86_CR0_WP)) )2148 || ( !(fFlags & X86_PTE_US) /* Kernel memory */2149 && pIemCpu->uCpl == 3)2150 || ( (fAccess & IEM_ACCESS_TYPE_EXEC) /* Executing non-executable memory? */2151 && (fFlags & X86_PTE_PAE_NX)2152 && (pIemCpu->CTX_SUFF(pCtx)->msrEFER & MSR_K6_EFER_NXE) )2153 )2154 )2155 {2156 *pGCPhysMem = NIL_RTGCPHYS;2157 return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);2158 }2159 2160 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;2161 *pGCPhysMem = GCPhys;2162 return VINF_SUCCESS;2163 }2164 2165 2166 2167 /**2168 * Maps a physical page.2169 *2170 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).2171 * @param pIemCpu The IEM per CPU data.2172 * @param GCPhysMem The physical address.2173 * @param fAccess The intended access.2174 * @param ppvMem Where to return the mapping address.2175 */2176 static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem)2177 {2178 #ifdef IEM_VERIFICATION_MODE2179 /* Force the alternative path so we can ignore writes. */2180 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pIemCpu->fNoRem)2181 return VERR_PGM_PHYS_TLB_CATCH_ALL;2182 #endif2183 2184 /*2185 * If we can map the page without trouble, do a block processing2186 * until the end of the current page.2187 */2188 /** @todo need some better API. */2189 return PGMR3PhysTlbGCPhys2Ptr(IEMCPU_TO_VM(pIemCpu),2190 GCPhysMem,2191 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),2192 ppvMem);2193 }2194 2195 2196 /**2197 * Looks up a memory mapping entry.2198 *2199 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).2200 * @param pIemCpu The IEM per CPU data.2201 * @param pvMem The memory address.2202 * @param fAccess The access to.2203 */2204 DECLINLINE(int) iemMapLookup(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)2205 {2206 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;2207 if ( pIemCpu->aMemMappings[0].pv == pvMem2208 && (pIemCpu->aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)2209 return 0;2210 if ( pIemCpu->aMemMappings[1].pv == pvMem2211 && (pIemCpu->aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)2212 return 1;2213 if ( pIemCpu->aMemMappings[2].pv == pvMem2214 && (pIemCpu->aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)2215 return 2;2216 return VERR_NOT_FOUND;2217 }2218 2219 2220 /**2221 * Finds a free memmap entry when using iNextMapping doesn't work.2222 *2223 * @returns Memory mapping index, 1024 on failure.2224 * @param pIemCpu The IEM per CPU data.2225 */2226 static unsigned iemMemMapFindFree(PIEMCPU pIemCpu)2227 {2228 /*2229 * The easy case.2230 */2231 if (pIemCpu->cActiveMappings == 0)2232 {2233 pIemCpu->iNextMapping = 1;2234 return 0;2235 }2236 2237 /* There should be enough mappings for all instructions. */2238 AssertReturn(pIemCpu->cActiveMappings < RT_ELEMENTS(pIemCpu->aMemMappings), 1024);2239 2240 AssertFailed(); /** @todo implement me. */2241 return 1024;2242 2243 }2244 2245 2246 /**2247 * Commits a bounce buffer that needs writing back and unmaps it.2248 *2249 * @returns Strict VBox status code.2250 * @param pIemCpu The IEM per CPU data.2251 * @param iMemMap The index of the buffer to commit.2252 */2253 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PIEMCPU pIemCpu, unsigned iMemMap)2254 {2255 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);2256 Assert(pIemCpu->aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);2257 2258 /*2259 * Do the writing.2260 */2261 int rc;2262 if (!pIemCpu->aMemBbMappings[iMemMap].fUnassigned && IEM_VERIFICATION_ENABLED(pIemCpu))2263 {2264 uint16_t const cbFirst = pIemCpu->aMemBbMappings[iMemMap].cbFirst;2265 uint16_t const cbSecond = pIemCpu->aMemBbMappings[iMemMap].cbSecond;2266 uint8_t const *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];2267 if (!pIemCpu->fByPassHandlers)2268 {2269 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),2270 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,2271 pbBuf,2272 cbFirst);2273 if (cbSecond && rc == VINF_SUCCESS)2274 rc = PGMPhysWrite(IEMCPU_TO_VM(pIemCpu),2275 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,2276 pbBuf + cbFirst,2277 cbSecond);2278 }2279 else2280 {2281 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),2282 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst,2283 pbBuf,2284 cbFirst);2285 if (cbSecond && rc == VINF_SUCCESS)2286 rc = PGMPhysSimpleWriteGCPhys(IEMCPU_TO_VM(pIemCpu),2287 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond,2288 pbBuf + cbFirst,2289 cbSecond);2290 }2291 }2292 else2293 rc = VINF_SUCCESS;2294 2295 #ifdef IEM_VERIFICATION_MODE2296 /*2297 * Record the write(s).2298 */2299 if (!pIemCpu->fNoRem)2300 {2301 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);2302 if (pEvtRec)2303 {2304 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;2305 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst;2306 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbFirst;2307 memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);2308 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;2309 *pIemCpu->ppIemEvtRecNext = pEvtRec;2310 }2311 if (pIemCpu->aMemBbMappings[iMemMap].cbSecond)2312 {2313 pEvtRec = iemVerifyAllocRecord(pIemCpu);2314 if (pEvtRec)2315 {2316 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;2317 pEvtRec->u.RamWrite.GCPhys = pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond;2318 pEvtRec->u.RamWrite.cb = pIemCpu->aMemBbMappings[iMemMap].cbSecond;2319 memcpy(pEvtRec->u.RamWrite.ab,2320 &pIemCpu->aBounceBuffers[iMemMap].ab[pIemCpu->aMemBbMappings[iMemMap].cbFirst],2321 pIemCpu->aMemBbMappings[iMemMap].cbSecond);2322 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;2323 *pIemCpu->ppIemEvtRecNext = pEvtRec;2324 }2325 }2326 }2327 #endif2328 2329 /*2330 * Free the mapping entry.2331 */2332 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;2333 Assert(pIemCpu->cActiveMappings != 0);2334 pIemCpu->cActiveMappings--;2335 return rc;2336 }2337 2338 2339 /**2340 * iemMemMap worker that deals with a request crossing pages.2341 */2342 static VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PIEMCPU pIemCpu, int iMemMap, void **ppvMem,2343 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)2344 {2345 /*2346 * Do the address translations.2347 */2348 RTGCPHYS GCPhysFirst;2349 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);2350 if (rcStrict != VINF_SUCCESS)2351 return rcStrict;2352 2353 RTGCPHYS GCPhysSecond;2354 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);2355 if (rcStrict != VINF_SUCCESS)2356 return rcStrict;2357 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;2358 2359 /*2360 * Read in the current memory content if it's a read of execute access.2361 */2362 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];2363 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);2364 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);2365 2366 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))2367 {2368 int rc;2369 if (!pIemCpu->fByPassHandlers)2370 {2371 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbFirstPage);2372 if (rc != VINF_SUCCESS)2373 return rc;2374 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage);2375 if (rc != VINF_SUCCESS)2376 return rc;2377 }2378 else2379 {2380 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbFirstPage);2381 if (rc != VINF_SUCCESS)2382 return rc;2383 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);2384 if (rc != VINF_SUCCESS)2385 return rc;2386 }2387 2388 #ifdef IEM_VERIFICATION_MODE2389 if (!pIemCpu->fNoRem)2390 {2391 /*2392 * Record the reads.2393 */2394 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);2395 if (pEvtRec)2396 {2397 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;2398 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;2399 pEvtRec->u.RamRead.cb = cbFirstPage;2400 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;2401 *pIemCpu->ppIemEvtRecNext = pEvtRec;2402 }2403 pEvtRec = iemVerifyAllocRecord(pIemCpu);2404 if (pEvtRec)2405 {2406 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;2407 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;2408 pEvtRec->u.RamRead.cb = cbSecondPage;2409 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;2410 *pIemCpu->ppIemEvtRecNext = pEvtRec;2411 }2412 }2413 #endif2414 }2415 #ifdef VBOX_STRICT2416 else2417 memset(pbBuf, 0xcc, cbMem);2418 #endif2419 #ifdef VBOX_STRICT2420 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))2421 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);2422 #endif2423 2424 /*2425 * Commit the bounce buffer entry.2426 */2427 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;2428 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;2429 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;2430 pIemCpu->aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;2431 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = false;2432 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;2433 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;2434 pIemCpu->cActiveMappings++;2435 2436 *ppvMem = pbBuf;2437 return VINF_SUCCESS;2438 }2439 2440 2441 /**2442 * iemMemMap woker that deals with iemMemPageMap failures.2443 */2444 static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,2445 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)2446 {2447 /*2448 * Filter out conditions we can handle and the ones which shouldn't happen.2449 */2450 if ( rcMap != VINF_PGM_PHYS_TLB_CATCH_WRITE2451 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL2452 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)2453 {2454 AssertReturn(RT_FAILURE_NP(rcMap), VERR_INTERNAL_ERROR_3);2455 return rcMap;2456 }2457 pIemCpu->cPotentialExits++;2458 2459 /*2460 * Read in the current memory content if it's a read of execute access.2461 */2462 uint8_t *pbBuf = &pIemCpu->aBounceBuffers[iMemMap].ab[0];2463 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC))2464 {2465 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)2466 memset(pbBuf, 0xff, cbMem);2467 else2468 {2469 int rc;2470 if (!pIemCpu->fByPassHandlers)2471 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhysFirst, pbBuf, cbMem);2472 else2473 rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf, GCPhysFirst, cbMem);2474 if (rc != VINF_SUCCESS)2475 return rc;2476 }2477 2478 #ifdef IEM_VERIFICATION_MODE2479 if (!pIemCpu->fNoRem)2480 {2481 /*2482 * Record the read.2483 */2484 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);2485 if (pEvtRec)2486 {2487 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;2488 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;2489 pEvtRec->u.RamRead.cb = cbMem;2490 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;2491 *pIemCpu->ppIemEvtRecNext = pEvtRec;2492 }2493 }2494 #endif2495 }2496 #ifdef VBOX_STRICT2497 else2498 memset(pbBuf, 0xcc, cbMem);2499 #endif2500 #ifdef VBOX_STRICT2501 if (cbMem < sizeof(pIemCpu->aBounceBuffers[iMemMap].ab))2502 memset(pbBuf + cbMem, 0xaa, sizeof(pIemCpu->aBounceBuffers[iMemMap].ab) - cbMem);2503 #endif2504 2505 /*2506 * Commit the bounce buffer entry.2507 */2508 pIemCpu->aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;2509 pIemCpu->aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;2510 pIemCpu->aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;2511 pIemCpu->aMemBbMappings[iMemMap].cbSecond = 0;2512 pIemCpu->aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;2513 pIemCpu->aMemMappings[iMemMap].pv = pbBuf;2514 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;2515 pIemCpu->cActiveMappings++;2516 2517 *ppvMem = pbBuf;2518 return VINF_SUCCESS;2519 }2520 2521 2522 2523 /**2524 * Maps the specified guest memory for the given kind of access.2525 *2526 * This may be using bounce buffering of the memory if it's crossing a page2527 * boundary or if there is an access handler installed for any of it. Because2528 * of lock prefix guarantees, we're in for some extra clutter when this2529 * happens.2530 *2531 * This may raise a \#GP, \#SS, \#PF or \#AC.2532 *2533 * @returns VBox strict status code.2534 *2535 * @param pIemCpu The IEM per CPU data.2536 * @param ppvMem Where to return the pointer to the mapped2537 * memory.2538 * @param cbMem The number of bytes to map. This is usually 1,2539 * 2, 4, 6, 8, 12, 16 or 32. When used by string2540 * operations it can be up to a page.2541 * @param iSegReg The index of the segment register to use for2542 * this access. The base and limits are checked.2543 * Use UINT8_MAX to indicate that no segmentation2544 * is required (for IDT, GDT and LDT accesses).2545 * @param GCPtrMem The address of the guest memory.2546 * @param a_fAccess How the memory is being accessed. The2547 * IEM_ACCESS_TYPE_XXX bit is used to figure out2548 * how to map the memory, while the2549 * IEM_ACCESS_WHAT_XXX bit is used when raising2550 * exceptions.2551 */2552 static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)2553 {2554 /*2555 * Check the input and figure out which mapping entry to use.2556 */2557 Assert(cbMem <= 32);2558 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));2559 2560 unsigned iMemMap = pIemCpu->iNextMapping;2561 if (iMemMap >= RT_ELEMENTS(pIemCpu->aMemMappings))2562 {2563 iMemMap = iemMemMapFindFree(pIemCpu);2564 AssertReturn(iMemMap < RT_ELEMENTS(pIemCpu->aMemMappings), VERR_INTERNAL_ERROR_3);2565 }2566 2567 /*2568 * Map the memory, checking that we can actually access it. If something2569 * slightly complicated happens, fall back on bounce buffering.2570 */2571 VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, fAccess, iSegReg, cbMem, &GCPtrMem);2572 if (rcStrict != VINF_SUCCESS)2573 return rcStrict;2574 2575 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */2576 return iemMemBounceBufferMapCrossPage(pIemCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);2577 2578 RTGCPHYS GCPhysFirst;2579 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, fAccess, &GCPhysFirst);2580 if (rcStrict != VINF_SUCCESS)2581 return rcStrict;2582 2583 void *pvMem;2584 rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem);2585 if (rcStrict != VINF_SUCCESS)2586 return iemMemBounceBufferMapPhys(pIemCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);2587 2588 /*2589 * Fill in the mapping table entry.2590 */2591 pIemCpu->aMemMappings[iMemMap].pv = pvMem;2592 pIemCpu->aMemMappings[iMemMap].fAccess = fAccess;2593 pIemCpu->iNextMapping = iMemMap + 1;2594 pIemCpu->cActiveMappings++;2595 2596 *ppvMem = pvMem;2597 return VINF_SUCCESS;2598 }2599 2600 2601 /**2602 * Commits the guest memory if bounce buffered and unmaps it.2603 *2604 * @returns Strict VBox status code.2605 * @param pIemCpu The IEM per CPU data.2606 * @param pvMem The mapping.2607 * @param fAccess The kind of access.2608 */2609 static VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess)2610 {2611 int iMemMap = iemMapLookup(pIemCpu, pvMem, fAccess);2612 AssertReturn(iMemMap >= 0, iMemMap);2613 2614 /*2615 * If it's bounce buffered, we need to write back the buffer.2616 */2617 if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))2618 == (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))2619 return iemMemBounceBufferCommitAndUnmap(pIemCpu, iMemMap);2620 2621 /* Free the entry. */2622 pIemCpu->aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;2623 Assert(pIemCpu->cActiveMappings != 0);2624 pIemCpu->cActiveMappings--;2625 return VINF_SUCCESS;2626 }2627 2628 2629 /**2630 * Fetches a data byte.2631 *2632 * @returns Strict VBox status code.2633 * @param pIemCpu The IEM per CPU data.2634 * @param pu8Dst Where to return the byte.2635 * @param iSegReg The index of the segment register to use for2636 * this access. The base and limits are checked.2637 * @param GCPtrMem The address of the guest memory.2638 */2639 static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)2640 {2641 /* The lazy approach for now... */2642 uint8_t const *pu8Src;2643 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);2644 if (rc == VINF_SUCCESS)2645 {2646 *pu8Dst = *pu8Src;2647 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);2648 }2649 return rc;2650 }2651 2652 2653 /**2654 * Fetches a data word.2655 *2656 * @returns Strict VBox status code.2657 * @param pIemCpu The IEM per CPU data.2658 * @param pu16Dst Where to return the word.2659 * @param iSegReg The index of the segment register to use for2660 * this access. The base and limits are checked.2661 * @param GCPtrMem The address of the guest memory.2662 */2663 static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)2664 {2665 /* The lazy approach for now... */2666 uint16_t const *pu16Src;2667 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);2668 if (rc == VINF_SUCCESS)2669 {2670 *pu16Dst = *pu16Src;2671 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);2672 }2673 return rc;2674 }2675 2676 2677 /**2678 * Fetches a data dword.2679 *2680 * @returns Strict VBox status code.2681 * @param pIemCpu The IEM per CPU data.2682 * @param pu32Dst Where to return the dword.2683 * @param iSegReg The index of the segment register to use for2684 * this access. The base and limits are checked.2685 * @param GCPtrMem The address of the guest memory.2686 */2687 static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)2688 {2689 /* The lazy approach for now... */2690 uint32_t const *pu32Src;2691 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);2692 if (rc == VINF_SUCCESS)2693 {2694 *pu32Dst = *pu32Src;2695 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);2696 }2697 return rc;2698 }2699 2700 2701 /**2702 * Fetches a data dword and sign extends it to a qword.2703 *2704 * @returns Strict VBox status code.2705 * @param pIemCpu The IEM per CPU data.2706 * @param pu64Dst Where to return the sign extended value.2707 * @param iSegReg The index of the segment register to use for2708 * this access. The base and limits are checked.2709 * @param GCPtrMem The address of the guest memory.2710 */2711 static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)2712 {2713 /* The lazy approach for now... */2714 int32_t const *pi32Src;2715 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);2716 if (rc == VINF_SUCCESS)2717 {2718 *pu64Dst = *pi32Src;2719 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);2720 }2721 #ifdef __GNUC__ /* warning: GCC may be a royal pain */2722 else2723 *pu64Dst = 0;2724 #endif2725 return rc;2726 }2727 2728 2729 /**2730 * Fetches a data qword.2731 *2732 * @returns Strict VBox status code.2733 * @param pIemCpu The IEM per CPU data.2734 * @param pu64Dst Where to return the qword.2735 * @param iSegReg The index of the segment register to use for2736 * this access. The base and limits are checked.2737 * @param GCPtrMem The address of the guest memory.2738 */2739 static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)2740 {2741 /* The lazy approach for now... */2742 uint64_t const *pu64Src;2743 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);2744 if (rc == VINF_SUCCESS)2745 {2746 *pu64Dst = *pu64Src;2747 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);2748 }2749 return rc;2750 }2751 2752 2753 /**2754 * Fetches a descriptor register (lgdt, lidt).2755 *2756 * @returns Strict VBox status code.2757 * @param pIemCpu The IEM per CPU data.2758 * @param pcbLimit Where to return the limit.2759 * @param pGCPTrBase Where to return the base.2760 * @param iSegReg The index of the segment register to use for2761 * this access. The base and limits are checked.2762 * @param GCPtrMem The address of the guest memory.2763 * @param enmOpSize The effective operand size.2764 */2765 static VBOXSTRICTRC iemMemFetchDataXdtr(PIEMCPU pIemCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase,2766 uint8_t iSegReg, RTGCPTR GCPtrMem, IEMMODE enmOpSize)2767 {2768 uint8_t const *pu8Src;2769 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu,2770 (void **)&pu8Src,2771 enmOpSize == IEMMODE_64BIT2772 ? 2 + 82773 : enmOpSize == IEMMODE_32BIT2774 ? 2 + 42775 : 2 + 3,2776 iSegReg,2777 GCPtrMem,2778 IEM_ACCESS_DATA_R);2779 if (rcStrict == VINF_SUCCESS)2780 {2781 *pcbLimit = RT_MAKE_U16(pu8Src[0], pu8Src[1]);2782 switch (enmOpSize)2783 {2784 case IEMMODE_16BIT:2785 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], 0);2786 break;2787 case IEMMODE_32BIT:2788 *pGCPtrBase = RT_MAKE_U32_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5]);2789 break;2790 case IEMMODE_64BIT:2791 *pGCPtrBase = RT_MAKE_U64_FROM_U8(pu8Src[2], pu8Src[3], pu8Src[4], pu8Src[5],2792 pu8Src[6], pu8Src[7], pu8Src[8], pu8Src[9]);2793 break;2794 2795 IEM_NOT_REACHED_DEFAULT_CASE_RET();2796 }2797 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);2798 }2799 return rcStrict;2800 }2801 2802 2803 2804 /**2805 * Stores a data byte.2806 *2807 * @returns Strict VBox status code.2808 * @param pIemCpu The IEM per CPU data.2809 * @param iSegReg The index of the segment register to use for2810 * this access. The base and limits are checked.2811 * @param GCPtrMem The address of the guest memory.2812 * @param u8Value The value to store.2813 */2814 static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)2815 {2816 /* The lazy approach for now... */2817 uint8_t *pu8Dst;2818 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);2819 if (rc == VINF_SUCCESS)2820 {2821 *pu8Dst = u8Value;2822 rc = iemMemCommitAndUnmap(pIemCpu, pu8Dst, IEM_ACCESS_DATA_W);2823 }2824 return rc;2825 }2826 2827 2828 /**2829 * Stores a data word.2830 *2831 * @returns Strict VBox status code.2832 * @param pIemCpu The IEM per CPU data.2833 * @param iSegReg The index of the segment register to use for2834 * this access. The base and limits are checked.2835 * @param GCPtrMem The address of the guest memory.2836 * @param u16Value The value to store.2837 */2838 static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)2839 {2840 /* The lazy approach for now... */2841 uint16_t *pu16Dst;2842 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);2843 if (rc == VINF_SUCCESS)2844 {2845 *pu16Dst = u16Value;2846 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_DATA_W);2847 }2848 return rc;2849 }2850 2851 2852 /**2853 * Stores a data dword.2854 *2855 * @returns Strict VBox status code.2856 * @param pIemCpu The IEM per CPU data.2857 * @param iSegReg The index of the segment register to use for2858 * this access. The base and limits are checked.2859 * @param GCPtrMem The address of the guest memory.2860 * @param u32Value The value to store.2861 */2862 static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)2863 {2864 /* The lazy approach for now... */2865 uint32_t *pu32Dst;2866 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);2867 if (rc == VINF_SUCCESS)2868 {2869 *pu32Dst = u32Value;2870 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_DATA_W);2871 }2872 return rc;2873 }2874 2875 2876 /**2877 * Stores a data qword.2878 *2879 * @returns Strict VBox status code.2880 * @param pIemCpu The IEM per CPU data.2881 * @param iSegReg The index of the segment register to use for2882 * this access. The base and limits are checked.2883 * @param GCPtrMem The address of the guest memory.2884 * @param u64Value The value to store.2885 */2886 static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)2887 {2888 /* The lazy approach for now... */2889 uint64_t *pu64Dst;2890 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);2891 if (rc == VINF_SUCCESS)2892 {2893 *pu64Dst = u64Value;2894 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_DATA_W);2895 }2896 return rc;2897 }2898 2899 2900 /**2901 * Pushes a word onto the stack.2902 *2903 * @returns Strict VBox status code.2904 * @param pIemCpu The IEM per CPU data.2905 * @param u16Value The value to push.2906 */2907 static VBOXSTRICTRC iemMemStackPushU16(PIEMCPU pIemCpu, uint16_t u16Value)2908 {2909 /* Increment the stack pointer. */2910 uint64_t uNewRsp;2911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);2912 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 2, &uNewRsp);2913 2914 /* Write the word the lazy way. */2915 uint16_t *pu16Dst;2916 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);2917 if (rc == VINF_SUCCESS)2918 {2919 *pu16Dst = u16Value;2920 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);2921 }2922 2923 /* Commit the new RSP value unless we an access handler made trouble. */2924 if (rc == VINF_SUCCESS)2925 pCtx->rsp = uNewRsp;2926 2927 return rc;2928 }2929 2930 2931 /**2932 * Pushes a dword onto the stack.2933 *2934 * @returns Strict VBox status code.2935 * @param pIemCpu The IEM per CPU data.2936 * @param u32Value The value to push.2937 */2938 static VBOXSTRICTRC iemMemStackPushU32(PIEMCPU pIemCpu, uint32_t u32Value)2939 {2940 /* Increment the stack pointer. */2941 uint64_t uNewRsp;2942 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);2943 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 4, &uNewRsp);2944 2945 /* Write the word the lazy way. */2946 uint32_t *pu32Dst;2947 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);2948 if (rc == VINF_SUCCESS)2949 {2950 *pu32Dst = u32Value;2951 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);2952 }2953 2954 /* Commit the new RSP value unless we an access handler made trouble. */2955 if (rc == VINF_SUCCESS)2956 pCtx->rsp = uNewRsp;2957 2958 return rc;2959 }2960 2961 2962 /**2963 * Pushes a qword onto the stack.2964 *2965 * @returns Strict VBox status code.2966 * @param pIemCpu The IEM per CPU data.2967 * @param u64Value The value to push.2968 */2969 static VBOXSTRICTRC iemMemStackPushU64(PIEMCPU pIemCpu, uint64_t u64Value)2970 {2971 /* Increment the stack pointer. */2972 uint64_t uNewRsp;2973 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);2974 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, 8, &uNewRsp);2975 2976 /* Write the word the lazy way. */2977 uint64_t *pu64Dst;2978 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);2979 if (rc == VINF_SUCCESS)2980 {2981 *pu64Dst = u64Value;2982 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);2983 }2984 2985 /* Commit the new RSP value unless we an access handler made trouble. */2986 if (rc == VINF_SUCCESS)2987 pCtx->rsp = uNewRsp;2988 2989 return rc;2990 }2991 2992 2993 /**2994 * Pops a word from the stack.2995 *2996 * @returns Strict VBox status code.2997 * @param pIemCpu The IEM per CPU data.2998 * @param pu16Value Where to store the popped value.2999 */3000 static VBOXSTRICTRC iemMemStackPopU16(PIEMCPU pIemCpu, uint16_t *pu16Value)3001 {3002 /* Increment the stack pointer. */3003 uint64_t uNewRsp;3004 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3005 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 2, &uNewRsp);3006 3007 /* Write the word the lazy way. */3008 uint16_t const *pu16Src;3009 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);3010 if (rc == VINF_SUCCESS)3011 {3012 *pu16Value = *pu16Src;3013 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);3014 3015 /* Commit the new RSP value. */3016 if (rc == VINF_SUCCESS)3017 pCtx->rsp = uNewRsp;3018 }3019 3020 return rc;3021 }3022 3023 3024 /**3025 * Pops a dword from the stack.3026 *3027 * @returns Strict VBox status code.3028 * @param pIemCpu The IEM per CPU data.3029 * @param pu32Value Where to store the popped value.3030 */3031 static VBOXSTRICTRC iemMemStackPopU32(PIEMCPU pIemCpu, uint32_t *pu32Value)3032 {3033 /* Increment the stack pointer. */3034 uint64_t uNewRsp;3035 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3036 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 4, &uNewRsp);3037 3038 /* Write the word the lazy way. */3039 uint32_t const *pu32Src;3040 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);3041 if (rc == VINF_SUCCESS)3042 {3043 *pu32Value = *pu32Src;3044 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);3045 3046 /* Commit the new RSP value. */3047 if (rc == VINF_SUCCESS)3048 pCtx->rsp = uNewRsp;3049 }3050 3051 return rc;3052 }3053 3054 3055 /**3056 * Pops a qword from the stack.3057 *3058 * @returns Strict VBox status code.3059 * @param pIemCpu The IEM per CPU data.3060 * @param pu64Value Where to store the popped value.3061 */3062 static VBOXSTRICTRC iemMemStackPopU64(PIEMCPU pIemCpu, uint64_t *pu64Value)3063 {3064 /* Increment the stack pointer. */3065 uint64_t uNewRsp;3066 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3067 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, 8, &uNewRsp);3068 3069 /* Write the word the lazy way. */3070 uint64_t const *pu64Src;3071 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);3072 if (rc == VINF_SUCCESS)3073 {3074 *pu64Value = *pu64Src;3075 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);3076 3077 /* Commit the new RSP value. */3078 if (rc == VINF_SUCCESS)3079 pCtx->rsp = uNewRsp;3080 }3081 3082 return rc;3083 }3084 3085 3086 /**3087 * Pushes a word onto the stack, using a temporary stack pointer.3088 *3089 * @returns Strict VBox status code.3090 * @param pIemCpu The IEM per CPU data.3091 * @param u16Value The value to push.3092 * @param pTmpRsp Pointer to the temporary stack pointer.3093 */3094 static VBOXSTRICTRC iemMemStackPushU16Ex(PIEMCPU pIemCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)3095 {3096 /* Increment the stack pointer. */3097 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3098 RTUINT64U NewRsp = *pTmpRsp;3099 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 2, pCtx);3100 3101 /* Write the word the lazy way. */3102 uint16_t *pu16Dst;3103 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);3104 if (rc == VINF_SUCCESS)3105 {3106 *pu16Dst = u16Value;3107 rc = iemMemCommitAndUnmap(pIemCpu, pu16Dst, IEM_ACCESS_STACK_W);3108 }3109 3110 /* Commit the new RSP value unless we an access handler made trouble. */3111 if (rc == VINF_SUCCESS)3112 *pTmpRsp = NewRsp;3113 3114 return rc;3115 }3116 3117 3118 /**3119 * Pushes a dword onto the stack, using a temporary stack pointer.3120 *3121 * @returns Strict VBox status code.3122 * @param pIemCpu The IEM per CPU data.3123 * @param u32Value The value to push.3124 * @param pTmpRsp Pointer to the temporary stack pointer.3125 */3126 static VBOXSTRICTRC iemMemStackPushU32Ex(PIEMCPU pIemCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)3127 {3128 /* Increment the stack pointer. */3129 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3130 RTUINT64U NewRsp = *pTmpRsp;3131 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 4, pCtx);3132 3133 /* Write the word the lazy way. */3134 uint32_t *pu32Dst;3135 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);3136 if (rc == VINF_SUCCESS)3137 {3138 *pu32Dst = u32Value;3139 rc = iemMemCommitAndUnmap(pIemCpu, pu32Dst, IEM_ACCESS_STACK_W);3140 }3141 3142 /* Commit the new RSP value unless we an access handler made trouble. */3143 if (rc == VINF_SUCCESS)3144 *pTmpRsp = NewRsp;3145 3146 return rc;3147 }3148 3149 3150 /**3151 * Pushes a dword onto the stack, using a temporary stack pointer.3152 *3153 * @returns Strict VBox status code.3154 * @param pIemCpu The IEM per CPU data.3155 * @param u64Value The value to push.3156 * @param pTmpRsp Pointer to the temporary stack pointer.3157 */3158 static VBOXSTRICTRC iemMemStackPushU64Ex(PIEMCPU pIemCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)3159 {3160 /* Increment the stack pointer. */3161 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3162 RTUINT64U NewRsp = *pTmpRsp;3163 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(&NewRsp, 8, pCtx);3164 3165 /* Write the word the lazy way. */3166 uint64_t *pu64Dst;3167 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);3168 if (rc == VINF_SUCCESS)3169 {3170 *pu64Dst = u64Value;3171 rc = iemMemCommitAndUnmap(pIemCpu, pu64Dst, IEM_ACCESS_STACK_W);3172 }3173 3174 /* Commit the new RSP value unless we an access handler made trouble. */3175 if (rc == VINF_SUCCESS)3176 *pTmpRsp = NewRsp;3177 3178 return rc;3179 }3180 3181 3182 /**3183 * Pops a word from the stack, using a temporary stack pointer.3184 *3185 * @returns Strict VBox status code.3186 * @param pIemCpu The IEM per CPU data.3187 * @param pu16Value Where to store the popped value.3188 * @param pTmpRsp Pointer to the temporary stack pointer.3189 */3190 static VBOXSTRICTRC iemMemStackPopU16Ex(PIEMCPU pIemCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)3191 {3192 /* Increment the stack pointer. */3193 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3194 RTUINT64U NewRsp = *pTmpRsp;3195 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 2, pCtx);3196 3197 /* Write the word the lazy way. */3198 uint16_t const *pu16Src;3199 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);3200 if (rc == VINF_SUCCESS)3201 {3202 *pu16Value = *pu16Src;3203 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);3204 3205 /* Commit the new RSP value. */3206 if (rc == VINF_SUCCESS)3207 *pTmpRsp = NewRsp;3208 }3209 3210 return rc;3211 }3212 3213 3214 /**3215 * Pops a dword from the stack, using a temporary stack pointer.3216 *3217 * @returns Strict VBox status code.3218 * @param pIemCpu The IEM per CPU data.3219 * @param pu32Value Where to store the popped value.3220 * @param pTmpRsp Pointer to the temporary stack pointer.3221 */3222 static VBOXSTRICTRC iemMemStackPopU32Ex(PIEMCPU pIemCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)3223 {3224 /* Increment the stack pointer. */3225 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3226 RTUINT64U NewRsp = *pTmpRsp;3227 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 4, pCtx);3228 3229 /* Write the word the lazy way. */3230 uint32_t const *pu32Src;3231 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);3232 if (rc == VINF_SUCCESS)3233 {3234 *pu32Value = *pu32Src;3235 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);3236 3237 /* Commit the new RSP value. */3238 if (rc == VINF_SUCCESS)3239 *pTmpRsp = NewRsp;3240 }3241 3242 return rc;3243 }3244 3245 3246 /**3247 * Pops a qword from the stack, using a temporary stack pointer.3248 *3249 * @returns Strict VBox status code.3250 * @param pIemCpu The IEM per CPU data.3251 * @param pu64Value Where to store the popped value.3252 * @param pTmpRsp Pointer to the temporary stack pointer.3253 */3254 static VBOXSTRICTRC iemMemStackPopU64Ex(PIEMCPU pIemCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)3255 {3256 /* Increment the stack pointer. */3257 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3258 RTUINT64U NewRsp = *pTmpRsp;3259 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(&NewRsp, 8, pCtx);3260 3261 /* Write the word the lazy way. */3262 uint64_t const *pu64Src;3263 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);3264 if (rcStrict == VINF_SUCCESS)3265 {3266 *pu64Value = *pu64Src;3267 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);3268 3269 /* Commit the new RSP value. */3270 if (rcStrict == VINF_SUCCESS)3271 *pTmpRsp = NewRsp;3272 }3273 3274 return rcStrict;3275 }3276 3277 3278 /**3279 * Begin a special stack push (used by interrupt, exceptions and such).3280 *3281 * This will raise #SS or #PF if appropriate.3282 *3283 * @returns Strict VBox status code.3284 * @param pIemCpu The IEM per CPU data.3285 * @param cbMem The number of bytes to push onto the stack.3286 * @param ppvMem Where to return the pointer to the stack memory.3287 * As with the other memory functions this could be3288 * direct access or bounce buffered access, so3289 * don't commit register until the commit call3290 * succeeds.3291 * @param puNewRsp Where to return the new RSP value. This must be3292 * passed unchanged to3293 * iemMemStackPushCommitSpecial().3294 */3295 static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)3296 {3297 Assert(cbMem < UINT8_MAX);3298 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3299 RTGCPTR GCPtrTop = iemRegGetRspForPush(pCtx, (uint8_t)cbMem, puNewRsp);3300 return iemMemMap(pIemCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);3301 }3302 3303 3304 /**3305 * Commits a special stack push (started by iemMemStackPushBeginSpecial).3306 *3307 * This will update the rSP.3308 *3309 * @returns Strict VBox status code.3310 * @param pIemCpu The IEM per CPU data.3311 * @param pvMem The pointer returned by3312 * iemMemStackPushBeginSpecial().3313 * @param uNewRsp The new RSP value returned by3314 * iemMemStackPushBeginSpecial().3315 */3316 static VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp)3317 {3318 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem, IEM_ACCESS_STACK_W);3319 if (rcStrict == VINF_SUCCESS)3320 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;3321 return rcStrict;3322 }3323 3324 3325 /**3326 * Begin a special stack pop (used by iret, retf and such).3327 *3328 * This will raise #SS or #PF if appropriate.3329 *3330 * @returns Strict VBox status code.3331 * @param pIemCpu The IEM per CPU data.3332 * @param cbMem The number of bytes to push onto the stack.3333 * @param ppvMem Where to return the pointer to the stack memory.3334 * @param puNewRsp Where to return the new RSP value. This must be3335 * passed unchanged to3336 * iemMemStackPopCommitSpecial().3337 */3338 static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)3339 {3340 Assert(cbMem < UINT8_MAX);3341 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3342 RTGCPTR GCPtrTop = iemRegGetRspForPop(pCtx, (uint8_t)cbMem, puNewRsp);3343 return iemMemMap(pIemCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);3344 }3345 3346 3347 /**3348 * Commits a special stack pop (started by iemMemStackPopBeginSpecial).3349 *3350 * This will update the rSP.3351 *3352 * @returns Strict VBox status code.3353 * @param pIemCpu The IEM per CPU data.3354 * @param pvMem The pointer returned by3355 * iemMemStackPopBeginSpecial().3356 * @param uNewRsp The new RSP value returned by3357 * iemMemStackPopBeginSpecial().3358 */3359 static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)3360 {3361 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pvMem, IEM_ACCESS_STACK_R);3362 if (rcStrict == VINF_SUCCESS)3363 pIemCpu->CTX_SUFF(pCtx)->rsp = uNewRsp;3364 return rcStrict;3365 }3366 3367 3368 /**3369 * Fetches a descriptor table entry.3370 *3371 * @returns Strict VBox status code.3372 * @param pIemCpu The IEM per CPU.3373 * @param pDesc Where to return the descriptor table entry.3374 * @param uSel The selector which table entry to fetch.3375 */3376 static VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel)3377 {3378 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3379 3380 /** @todo did the 286 require all 8 bytes to be accessible? */3381 /*3382 * Get the selector table base and check bounds.3383 */3384 RTGCPTR GCPtrBase;3385 if (uSel & X86_SEL_LDT)3386 {3387 if ( !pCtx->ldtrHid.Attr.n.u1Present3388 || (uSel | 0x7U) > pCtx->ldtrHid.u32Limit )3389 {3390 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",3391 uSel, pCtx->ldtrHid.u32Limit, pCtx->ldtr));3392 /** @todo is this the right exception? */3393 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));3394 }3395 3396 Assert(pCtx->ldtrHid.Attr.n.u1Present);3397 GCPtrBase = pCtx->ldtrHid.u64Base;3398 }3399 else3400 {3401 if ((uSel | 0x7U) > pCtx->gdtr.cbGdt)3402 {3403 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));3404 /** @todo is this the right exception? */3405 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));3406 }3407 GCPtrBase = pCtx->gdtr.pGdt;3408 }3409 3410 /*3411 * Read the legacy descriptor and maybe the long mode extensions if3412 * required.3413 */3414 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));3415 if (rcStrict == VINF_SUCCESS)3416 {3417 if ( !IEM_IS_LONG_MODE(pIemCpu)3418 || pDesc->Legacy.Gen.u1DescType)3419 pDesc->Long.au64[1] = 0;3420 else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))3421 rcStrict = iemMemFetchDataU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));3422 else3423 {3424 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));3425 /** @todo is this the right exception? */3426 return iemRaiseGeneralProtectionFault(pIemCpu, uSel & (X86_SEL_MASK | X86_SEL_LDT));3427 }3428 }3429 return rcStrict;3430 }3431 3432 3433 /**3434 * Marks the selector descriptor as accessed (only non-system descriptors).3435 *3436 * This function ASSUMES that iemMemFetchSelDesc has be called previously and3437 * will therefore skip the limit checks.3438 *3439 * @returns Strict VBox status code.3440 * @param pIemCpu The IEM per CPU.3441 * @param uSel The selector.3442 */3443 static VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel)3444 {3445 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);3446 3447 /*3448 * Get the selector table base and check bounds.3449 */3450 RTGCPTR GCPtr = uSel & X86_SEL_LDT3451 ? pCtx->ldtrHid.u64Base3452 : pCtx->gdtr.pGdt;3453 GCPtr += uSel & X86_SEL_MASK;3454 GCPtr += 2 + 2;3455 uint32_t volatile *pu32; /** @todo Does the CPU do a 32-bit or 8-bit access here? */3456 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW);3457 if (rcStrict == VINF_SUCCESS)3458 {3459 ASMAtomicBitSet(pu32, 0); /* X86_SEL_TYPE_ACCESSED is 1 */3460 3461 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW);3462 }3463 3464 return rcStrict;3465 }3466 3467 /** @} */3468 3469 3470 19 /** @name Misc Helpers 3471 20 * @{ … … 3497 46 3498 47 /** @} */ 3499 3500 48 3501 49 /** @name C Implementations … … 5656 2204 /** @} */ 5657 2205 5658 5659 /** @name "Microcode" macros.5660 *5661 * The idea is that we should be able to use the same code to interpret5662 * instructions as well as recompiler instructions. Thus this obfuscation.5663 *5664 * @{5665 */5666 #define IEM_MC_BEGIN(cArgs, cLocals) {5667 #define IEM_MC_END() }5668 #define IEM_MC_PAUSE() do {} while (0)5669 #define IEM_MC_CONTINUE() do {} while (0)5670 5671 /** Internal macro. */5672 #define IEM_MC_RETURN_ON_FAILURE(a_Expr) \5673 do \5674 { \5675 VBOXSTRICTRC rcStrict2 = a_Expr; \5676 if (rcStrict2 != VINF_SUCCESS) \5677 return rcStrict2; \5678 } while (0)5679 5680 #define IEM_MC_ADVANCE_RIP() iemRegUpdateRip(pIemCpu)5681 #define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pIemCpu, a_i8))5682 #define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))5683 #define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))5684 #define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))5685 #define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))5686 #define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))5687 5688 #define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pIemCpu)5689 5690 #define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name5691 #define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)5692 #define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)5693 #define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name5694 #define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)5695 #define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \5696 uint32_t a_Name; \5697 uint32_t *a_pName = &a_Name5698 #define IEM_MC_COMMIT_EFLAGS(a_EFlags) \5699 do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)5700 5701 #define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)5702 5703 #define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))5704 #define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))5705 #define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))5706 #define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))5707 #define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))5708 #define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))5709 #define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))5710 #define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))5711 #define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))5712 #define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pIemCpu, (a_iGReg))5713 #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))5714 #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))5715 #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))5716 #define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pIemCpu)->CTX_SUFF(pCtx)->eflags.u5717 5718 #define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8(pIemCpu, (a_iGReg)) = (a_u8Value)5719 #define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)5720 #define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */5721 #define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)5722 5723 #define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8(pIemCpu, (a_iGReg))5724 #define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))5725 /** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on5726 * commit. */5727 #define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))5728 #define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))5729 #define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pIemCpu)->CTX_SUFF(pCtx)->eflags.u5730 5731 #define IEM_MC_ADD_GREG_U8(a_iGReg, a_u16Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)5732 #define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)5733 #define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \5734 do { \5735 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \5736 *pu32Reg += (a_u32Value); \5737 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \5738 } while (0)5739 #define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)5740 5741 #define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)5742 #define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)5743 #define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \5744 do { \5745 uint32_t *pu32Reg = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg)); \5746 *pu32Reg -= (a_u32Value); \5747 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \5748 } while (0)5749 #define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)5750 5751 #define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u16Value, a_iGReg) (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg))5752 #define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg))5753 #define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg))5754 #define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg))5755 5756 5757 #define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)5758 #define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)5759 5760 5761 5762 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \5763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))5764 #define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \5765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))5766 #define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \5767 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))5768 #define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \5769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))5770 #define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \5771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))5772 5773 #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \5774 do { \5775 uint8_t u8Tmp; \5776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \5777 (a_u16Dst) = u8Tmp; \5778 } while (0)5779 #define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \5780 do { \5781 uint8_t u8Tmp; \5782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \5783 (a_u32Dst) = u8Tmp; \5784 } while (0)5785 #define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \5786 do { \5787 uint8_t u8Tmp; \5788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pIemCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \5789 (a_u64Dst) = u8Tmp; \5790 } while (0)5791 #define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \5792 do { \5793 uint16_t u16Tmp; \5794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \5795 (a_u32Dst) = u16Tmp; \5796 } while (0)5797 #define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \5798 do { \5799 uint16_t u16Tmp; \5800 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \5801 (a_u64Dst) = u16Tmp; \5802 } while (0)5803 #define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \5804 do { \5805 uint32_t u32Tmp; \5806 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \5807 (a_u64Dst) = u32Tmp; \5808 } while (0)5809 5810 #define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \5811 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))5812 #define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \5813 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))5814 #define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \5815 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))5816 #define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \5817 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pIemCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))5818 5819 #define IEM_MC_PUSH_U16(a_u16Value) \5820 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pIemCpu, (a_u16Value)))5821 #define IEM_MC_PUSH_U32(a_u32Value) \5822 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pIemCpu, (a_u32Value)))5823 #define IEM_MC_PUSH_U64(a_u64Value) \5824 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pIemCpu, (a_u64Value)))5825 5826 #define IEM_MC_POP_U16(a_pu16Value) \5827 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pIemCpu, (a_pu16Value)))5828 #define IEM_MC_POP_U32(a_pu32Value) \5829 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pIemCpu, (a_pu32Value)))5830 #define IEM_MC_POP_U64(a_pu64Value) \5831 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pIemCpu, (a_pu64Value)))5832 5833 /** Maps guest memory for direct or bounce buffered access.5834 * The purpose is to pass it to an operand implementation, thus the a_iArg.5835 * @remarks May return.5836 */5837 #define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \5838 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))5839 5840 /** Maps guest memory for direct or bounce buffered access.5841 * The purpose is to pass it to an operand implementation, thus the a_iArg.5842 * @remarks May return.5843 */5844 #define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \5845 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))5846 5847 /** Commits the memory and unmaps the guest memory.5848 * @remarks May return.5849 */5850 #define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \5851 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pIemCpu, (a_pvMem), (a_fAccess)))5852 5853 /** Calculate efficient address from R/M. */5854 #define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm) \5855 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), &(a_GCPtrEff)))5856 5857 #define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))5858 #define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))5859 #define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))5860 5861 /**5862 * Defers the rest of the instruction emulation to a C implementation routine5863 * and returns, only taking the standard parameters.5864 *5865 * @param a_pfnCImpl The pointer to the C routine.5866 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.5867 */5868 #define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)5869 5870 /**5871 * Defers the rest of instruction emulation to a C implementation routine and5872 * returns, taking one argument in addition to the standard ones.5873 *5874 * @param a_pfnCImpl The pointer to the C routine.5875 * @param a0 The argument.5876 */5877 #define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)5878 5879 /**5880 * Defers the rest of the instruction emulation to a C implementation routine5881 * and returns, taking two arguments in addition to the standard ones.5882 *5883 * @param a_pfnCImpl The pointer to the C routine.5884 * @param a0 The first extra argument.5885 * @param a1 The second extra argument.5886 */5887 #define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)5888 5889 /**5890 * Defers the rest of the instruction emulation to a C implementation routine5891 * and returns, taking two arguments in addition to the standard ones.5892 *5893 * @param a_pfnCImpl The pointer to the C routine.5894 * @param a0 The first extra argument.5895 * @param a1 The second extra argument.5896 * @param a2 The third extra argument.5897 */5898 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)5899 5900 /**5901 * Defers the rest of the instruction emulation to a C implementation routine5902 * and returns, taking two arguments in addition to the standard ones.5903 *5904 * @param a_pfnCImpl The pointer to the C routine.5905 * @param a0 The first extra argument.5906 * @param a1 The second extra argument.5907 * @param a2 The third extra argument.5908 * @param a3 The fourth extra argument.5909 * @param a4 The fifth extra argument.5910 */5911 #define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)5912 5913 /**5914 * Defers the entire instruction emulation to a C implementation routine and5915 * returns, only taking the standard parameters.5916 *5917 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.5918 *5919 * @param a_pfnCImpl The pointer to the C routine.5920 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.5921 */5922 #define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode)5923 5924 /**5925 * Defers the entire instruction emulation to a C implementation routine and5926 * returns, taking one argument in addition to the standard ones.5927 *5928 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.5929 *5930 * @param a_pfnCImpl The pointer to the C routine.5931 * @param a0 The argument.5932 */5933 #define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0)5934 5935 /**5936 * Defers the entire instruction emulation to a C implementation routine and5937 * returns, taking two arguments in addition to the standard ones.5938 *5939 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.5940 *5941 * @param a_pfnCImpl The pointer to the C routine.5942 * @param a0 The first extra argument.5943 * @param a1 The second extra argument.5944 */5945 #define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)5946 5947 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) {5948 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBits)) {5949 #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \5950 if ( !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \5951 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {5952 #define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \5953 if ( (pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit)) \5954 || !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit1)) \5955 != !!(pIemCpu->CTX_SUFF(pCtx)->eflags.u & (a_fBit2)) ) {5956 #define IEM_MC_IF_CX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->cx != 0) {5957 #define IEM_MC_IF_ECX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->ecx != 0) {5958 #define IEM_MC_IF_RCX_IS_NZ() if (pIemCpu->CTX_SUFF(pCtx)->rcx != 0) {5959 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \5960 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \5961 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {5962 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \5963 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \5964 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {5965 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \5966 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \5967 && (pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {5968 #define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \5969 if ( pIemCpu->CTX_SUFF(pCtx)->cx != 0 \5970 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {5971 #define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \5972 if ( pIemCpu->CTX_SUFF(pCtx)->ecx != 0 \5973 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {5974 #define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \5975 if ( pIemCpu->CTX_SUFF(pCtx)->rcx != 0 \5976 && !(pIemCpu->CTX_SUFF(pCtx)->eflags.u & a_fBit)) {5977 #define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {5978 #define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {5979 #define IEM_MC_ELSE() } else {5980 #define IEM_MC_ENDIF() } do {} while (0)5981 5982 /** @} */5983 5984 5985 /** @name Opcode Debug Helpers.5986 * @{5987 */5988 #ifdef DEBUG5989 # define IEMOP_MNEMONIC(a_szMnemonic) \5990 Log2(("decode - %04x:%08RGv %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic))5991 # define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \5992 Log2(("decode - %04x:%08RGv %s %s\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, a_szMnemonic, a_szOps))5993 #else5994 # define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)5995 # define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)5996 #endif5997 5998 /** @} */5999 6000 6001 /** @name Opcode Helpers.6002 * @{6003 */6004 6005 /** The instruction allows no lock prefixing (in this encoding), throw #UD if6006 * lock prefixed. */6007 #define IEMOP_HLP_NO_LOCK_PREFIX() \6008 do \6009 { \6010 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \6011 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \6012 } while (0)6013 6014 /** The instruction is not available in 64-bit mode, throw #UD if we're in6015 * 64-bit mode. */6016 #define IEMOP_HLP_NO_64BIT() \6017 do \6018 { \6019 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK) \6020 return IEMOP_RAISE_INVALID_OPCODE(); \6021 } while (0)6022 6023 /** The instruction defaults to 64-bit operand size if 64-bit mode. */6024 #define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \6025 do \6026 { \6027 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) \6028 iemRecalEffOpSize64Default(pIemCpu); \6029 } while (0)6030 6031 6032 6033 /**6034 * Calculates the effective address of a ModR/M memory operand.6035 *6036 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.6037 *6038 * @return Strict VBox status code.6039 * @param pIemCpu The IEM per CPU data.6040 * @param bRm The ModRM byte.6041 * @param pGCPtrEff Where to return the effective address.6042 */6043 static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, PRTGCPTR pGCPtrEff)6044 {6045 LogFlow(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));6046 PCCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);6047 #define SET_SS_DEF() \6048 do \6049 { \6050 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SEG_MASK)) \6051 pIemCpu->iEffSeg = X86_SREG_SS; \6052 } while (0)6053 6054 /** @todo Check the effective address size crap! */6055 switch (pIemCpu->enmEffAddrMode)6056 {6057 case IEMMODE_16BIT:6058 {6059 uint16_t u16EffAddr;6060 6061 /* Handle the disp16 form with no registers first. */6062 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)6063 IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr);6064 else6065 {6066 /* Get the displacment. */6067 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)6068 {6069 case 0: u16EffAddr = 0; break;6070 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(pIemCpu, &u16EffAddr); break;6071 case 2: IEM_OPCODE_GET_NEXT_U16(pIemCpu, &u16EffAddr); break;6072 default: AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */6073 }6074 6075 /* Add the base and index registers to the disp. */6076 switch (bRm & X86_MODRM_RM_MASK)6077 {6078 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;6079 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;6080 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;6081 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;6082 case 4: u16EffAddr += pCtx->si; break;6083 case 5: u16EffAddr += pCtx->di; break;6084 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;6085 case 7: u16EffAddr += pCtx->bx; break;6086 }6087 }6088 6089 *pGCPtrEff = u16EffAddr;6090 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#06RGv\n", *pGCPtrEff));6091 return VINF_SUCCESS;6092 }6093 6094 case IEMMODE_32BIT:6095 {6096 uint32_t u32EffAddr;6097 6098 /* Handle the disp32 form with no registers first. */6099 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)6100 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32EffAddr);6101 else6102 {6103 /* Get the register (or SIB) value. */6104 switch ((bRm & X86_MODRM_RM_MASK))6105 {6106 case 0: u32EffAddr = pCtx->eax; break;6107 case 1: u32EffAddr = pCtx->ecx; break;6108 case 2: u32EffAddr = pCtx->edx; break;6109 case 3: u32EffAddr = pCtx->ebx; break;6110 case 4: /* SIB */6111 {6112 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);6113 6114 /* Get the index and scale it. */6115 switch ((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK)6116 {6117 case 0: u32EffAddr = pCtx->eax; break;6118 case 1: u32EffAddr = pCtx->ecx; break;6119 case 2: u32EffAddr = pCtx->edx; break;6120 case 3: u32EffAddr = pCtx->ebx; break;6121 case 4: u32EffAddr = 0; /*none */ break;6122 case 5: u32EffAddr = pCtx->ebp; break;6123 case 6: u32EffAddr = pCtx->esi; break;6124 case 7: u32EffAddr = pCtx->edi; break;6125 IEM_NOT_REACHED_DEFAULT_CASE_RET();6126 }6127 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;6128 6129 /* add base */6130 switch (bSib & X86_SIB_BASE_MASK)6131 {6132 case 0: u32EffAddr += pCtx->eax; break;6133 case 1: u32EffAddr += pCtx->ecx; break;6134 case 2: u32EffAddr += pCtx->edx; break;6135 case 3: u32EffAddr += pCtx->ebx; break;6136 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;6137 case 5:6138 if ((bRm & X86_MODRM_MOD_MASK) != 0)6139 {6140 u32EffAddr += pCtx->ebp;6141 SET_SS_DEF();6142 }6143 else6144 {6145 uint32_t u32Disp;6146 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);6147 u32EffAddr += u32Disp;6148 }6149 break;6150 case 6: u32EffAddr += pCtx->esi; break;6151 case 7: u32EffAddr += pCtx->edi; break;6152 IEM_NOT_REACHED_DEFAULT_CASE_RET();6153 }6154 break;6155 }6156 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;6157 case 6: u32EffAddr = pCtx->esi; break;6158 case 7: u32EffAddr = pCtx->edi; break;6159 IEM_NOT_REACHED_DEFAULT_CASE_RET();6160 }6161 6162 /* Get and add the displacement. */6163 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)6164 {6165 case 0:6166 break;6167 case 1:6168 {6169 int8_t i8Disp;6170 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);6171 u32EffAddr += i8Disp;6172 break;6173 }6174 case 2:6175 {6176 uint32_t u32Disp;6177 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);6178 u32EffAddr += u32Disp;6179 break;6180 }6181 default:6182 AssertFailedReturn(VERR_INTERNAL_ERROR_2); /* (caller checked for these) */6183 }6184 6185 }6186 if (pIemCpu->enmEffAddrMode == IEMMODE_32BIT)6187 *pGCPtrEff = u32EffAddr;6188 else6189 {6190 Assert(pIemCpu->enmEffAddrMode == IEMMODE_16BIT);6191 *pGCPtrEff = u32EffAddr & UINT16_MAX;6192 }6193 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));6194 return VINF_SUCCESS;6195 }6196 6197 case IEMMODE_64BIT:6198 {6199 uint64_t u64EffAddr;6200 6201 /* Handle the rip+disp32 form with no registers first. */6202 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)6203 {6204 IEM_OPCODE_GET_NEXT_S32_SX_U64(pIemCpu, &u64EffAddr);6205 u64EffAddr += pCtx->rip + pIemCpu->offOpcode;6206 }6207 else6208 {6209 /* Get the register (or SIB) value. */6210 switch ((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB)6211 {6212 case 0: u64EffAddr = pCtx->rax; break;6213 case 1: u64EffAddr = pCtx->rcx; break;6214 case 2: u64EffAddr = pCtx->rdx; break;6215 case 3: u64EffAddr = pCtx->rbx; break;6216 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;6217 case 6: u64EffAddr = pCtx->rsi; break;6218 case 7: u64EffAddr = pCtx->rdi; break;6219 case 8: u64EffAddr = pCtx->r8; break;6220 case 9: u64EffAddr = pCtx->r9; break;6221 case 10: u64EffAddr = pCtx->r10; break;6222 case 11: u64EffAddr = pCtx->r11; break;6223 case 13: u64EffAddr = pCtx->r13; break;6224 case 14: u64EffAddr = pCtx->r14; break;6225 case 15: u64EffAddr = pCtx->r15; break;6226 /* SIB */6227 case 4:6228 case 12:6229 {6230 uint8_t bSib; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bSib);6231 6232 /* Get the index and scale it. */6233 switch (((bSib & X86_SIB_INDEX_SHIFT) >> X86_SIB_INDEX_SMASK) | pIemCpu->uRexIndex)6234 {6235 case 0: u64EffAddr = pCtx->rax; break;6236 case 1: u64EffAddr = pCtx->rcx; break;6237 case 2: u64EffAddr = pCtx->rdx; break;6238 case 3: u64EffAddr = pCtx->rbx; break;6239 case 4: u64EffAddr = 0; /*none */ break;6240 case 5: u64EffAddr = pCtx->rbp; break;6241 case 6: u64EffAddr = pCtx->rsi; break;6242 case 7: u64EffAddr = pCtx->rdi; break;6243 case 8: u64EffAddr = pCtx->r8; break;6244 case 9: u64EffAddr = pCtx->r9; break;6245 case 10: u64EffAddr = pCtx->r10; break;6246 case 11: u64EffAddr = pCtx->r11; break;6247 case 12: u64EffAddr = pCtx->r12; break;6248 case 13: u64EffAddr = pCtx->r13; break;6249 case 14: u64EffAddr = pCtx->r14; break;6250 case 15: u64EffAddr = pCtx->r15; break;6251 IEM_NOT_REACHED_DEFAULT_CASE_RET();6252 }6253 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;6254 6255 /* add base */6256 switch ((bSib & X86_SIB_BASE_MASK) | pIemCpu->uRexB)6257 {6258 case 0: u64EffAddr += pCtx->rax; break;6259 case 1: u64EffAddr += pCtx->rcx; break;6260 case 2: u64EffAddr += pCtx->rdx; break;6261 case 3: u64EffAddr += pCtx->rbx; break;6262 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;6263 case 6: u64EffAddr += pCtx->rsi; break;6264 case 7: u64EffAddr += pCtx->rdi; break;6265 case 8: u64EffAddr += pCtx->r8; break;6266 case 9: u64EffAddr += pCtx->r9; break;6267 case 10: u64EffAddr += pCtx->r10; break;6268 case 11: u64EffAddr += pCtx->r11; break;6269 case 14: u64EffAddr += pCtx->r14; break;6270 case 15: u64EffAddr += pCtx->r15; break;6271 /* complicated encodings */6272 case 5:6273 case 13:6274 if ((bRm & X86_MODRM_MOD_MASK) != 0)6275 {6276 if (!pIemCpu->uRexB)6277 {6278 u64EffAddr += pCtx->rbp;6279 SET_SS_DEF();6280 }6281 else6282 u64EffAddr += pCtx->r13;6283 }6284 else6285 {6286 uint32_t u32Disp;6287 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);6288 u64EffAddr += (int32_t)u32Disp;6289 }6290 break;6291 }6292 break;6293 }6294 IEM_NOT_REACHED_DEFAULT_CASE_RET();6295 }6296 6297 /* Get and add the displacement. */6298 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)6299 {6300 case 0:6301 break;6302 case 1:6303 {6304 int8_t i8Disp;6305 IEM_OPCODE_GET_NEXT_S8(pIemCpu, &i8Disp);6306 u64EffAddr += i8Disp;6307 break;6308 }6309 case 2:6310 {6311 uint32_t u32Disp;6312 IEM_OPCODE_GET_NEXT_U32(pIemCpu, &u32Disp);6313 u64EffAddr += (int32_t)u32Disp;6314 break;6315 }6316 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */6317 }6318 6319 }6320 if (pIemCpu->enmEffAddrMode == IEMMODE_64BIT)6321 *pGCPtrEff = u64EffAddr;6322 else6323 *pGCPtrEff = u64EffAddr & UINT16_MAX;6324 LogFlow(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));6325 return VINF_SUCCESS;6326 }6327 }6328 6329 AssertFailedReturn(VERR_INTERNAL_ERROR_3);6330 }6331 6332 /** @} */6333 6334 6335 6336 /*6337 * Include the instructions6338 */6339 #include "IEMAllInstructions.cpp.h"6340 6341 6342 6343 6344 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)6345 6346 /**6347 * Sets up execution verification mode.6348 */6349 static void iemExecVerificationModeSetup(PIEMCPU pIemCpu)6350 {6351 PCPUMCTX pOrgCtx = pIemCpu->CTX_SUFF(pCtx);6352 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */6353 6354 /*6355 * Switch state.6356 */6357 if (!IEM_VERIFICATION_ENABLED(pIemCpu))6358 {6359 static CPUMCTX s_DebugCtx; /* Ugly! */6360 6361 s_DebugCtx = *pOrgCtx;6362 pIemCpu->CTX_SUFF(pCtx) = &s_DebugCtx;6363 }6364 6365 /*6366 * See if there is an interrupt pending in TRPM and inject it if we can.6367 */6368 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);6369 if ( pOrgCtx->eflags.Bits.u1IF6370 && TRPMHasTrap(pVCpu)6371 //&& TRPMIsSoftwareInterrupt(pVCpu)6372 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)6373 {6374 Log(("Injecting trap %#x\n", TRPMGetTrapNo(pVCpu)));6375 iemCImpl_int(pIemCpu, 0, TRPMGetTrapNo(pVCpu), false);6376 if (IEM_VERIFICATION_ENABLED(pIemCpu))6377 TRPMResetTrap(pVCpu);6378 }6379 6380 /*6381 * Reset the counters.6382 */6383 pIemCpu->cIOReads = 0;6384 pIemCpu->cIOWrites = 0;6385 pIemCpu->fMulDivHack = false;6386 pIemCpu->fShiftOfHack= false;6387 6388 if (!IEM_VERIFICATION_ENABLED(pIemCpu))6389 {6390 /*6391 * Free all verification records.6392 */6393 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pIemEvtRecHead;6394 pIemCpu->pIemEvtRecHead = NULL;6395 pIemCpu->ppIemEvtRecNext = &pIemCpu->pIemEvtRecHead;6396 do6397 {6398 while (pEvtRec)6399 {6400 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;6401 pEvtRec->pNext = pIemCpu->pFreeEvtRec;6402 pIemCpu->pFreeEvtRec = pEvtRec;6403 pEvtRec = pNext;6404 }6405 pEvtRec = pIemCpu->pOtherEvtRecHead;6406 pIemCpu->pOtherEvtRecHead = NULL;6407 pIemCpu->ppOtherEvtRecNext = &pIemCpu->pOtherEvtRecHead;6408 } while (pEvtRec);6409 }6410 }6411 6412 6413 /**6414 * Allocate an event record.6415 * @returns Poitner to a record.6416 */6417 static PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu)6418 {6419 if (IEM_VERIFICATION_ENABLED(pIemCpu))6420 return NULL;6421 6422 PIEMVERIFYEVTREC pEvtRec = pIemCpu->pFreeEvtRec;6423 if (pEvtRec)6424 pIemCpu->pFreeEvtRec = pEvtRec->pNext;6425 else6426 {6427 if (!pIemCpu->ppIemEvtRecNext)6428 return NULL; /* Too early (fake PCIBIOS), ignore notification. */6429 6430 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));6431 if (!pEvtRec)6432 return NULL;6433 }6434 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;6435 pEvtRec->pNext = NULL;6436 return pEvtRec;6437 }6438 6439 6440 /**6441 * IOMMMIORead notification.6442 */6443 VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)6444 {6445 PVMCPU pVCpu = VMMGetCpu(pVM);6446 if (!pVCpu)6447 return;6448 PIEMCPU pIemCpu = &pVCpu->iem.s;6449 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);6450 if (!pEvtRec)6451 return;6452 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;6453 pEvtRec->u.RamRead.GCPhys = GCPhys;6454 pEvtRec->u.RamRead.cb = cbValue;6455 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;6456 *pIemCpu->ppOtherEvtRecNext = pEvtRec;6457 }6458 6459 6460 /**6461 * IOMMMIOWrite notification.6462 */6463 VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)6464 {6465 PVMCPU pVCpu = VMMGetCpu(pVM);6466 if (!pVCpu)6467 return;6468 PIEMCPU pIemCpu = &pVCpu->iem.s;6469 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);6470 if (!pEvtRec)6471 return;6472 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;6473 pEvtRec->u.RamWrite.GCPhys = GCPhys;6474 pEvtRec->u.RamWrite.cb = cbValue;6475 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);6476 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);6477 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);6478 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);6479 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;6480 *pIemCpu->ppOtherEvtRecNext = pEvtRec;6481 }6482 6483 6484 /**6485 * IOMIOPortRead notification.6486 */6487 VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)6488 {6489 PVMCPU pVCpu = VMMGetCpu(pVM);6490 if (!pVCpu)6491 return;6492 PIEMCPU pIemCpu = &pVCpu->iem.s;6493 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);6494 if (!pEvtRec)6495 return;6496 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;6497 pEvtRec->u.IOPortRead.Port = Port;6498 pEvtRec->u.IOPortRead.cbValue = cbValue;6499 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;6500 *pIemCpu->ppOtherEvtRecNext = pEvtRec;6501 }6502 6503 /**6504 * IOMIOPortWrite notification.6505 */6506 VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)6507 {6508 PVMCPU pVCpu = VMMGetCpu(pVM);6509 if (!pVCpu)6510 return;6511 PIEMCPU pIemCpu = &pVCpu->iem.s;6512 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);6513 if (!pEvtRec)6514 return;6515 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;6516 pEvtRec->u.IOPortWrite.Port = Port;6517 pEvtRec->u.IOPortWrite.cbValue = cbValue;6518 pEvtRec->u.IOPortWrite.u32Value = u32Value;6519 pEvtRec->pNext = *pIemCpu->ppOtherEvtRecNext;6520 *pIemCpu->ppOtherEvtRecNext = pEvtRec;6521 }6522 6523 6524 VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)6525 {6526 AssertFailed();6527 }6528 6529 6530 VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)6531 {6532 AssertFailed();6533 }6534 6535 6536 /**6537 * Fakes and records an I/O port read.6538 *6539 * @returns VINF_SUCCESS.6540 * @param pIemCpu The IEM per CPU data.6541 * @param Port The I/O port.6542 * @param pu32Value Where to store the fake value.6543 * @param cbValue The size of the access.6544 */6545 static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)6546 {6547 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);6548 if (pEvtRec)6549 {6550 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;6551 pEvtRec->u.IOPortRead.Port = Port;6552 pEvtRec->u.IOPortRead.cbValue = cbValue;6553 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;6554 *pIemCpu->ppIemEvtRecNext = pEvtRec;6555 }6556 pIemCpu->cIOReads++;6557 *pu32Value = 0xffffffff;6558 return VINF_SUCCESS;6559 }6560 6561 6562 /**6563 * Fakes and records an I/O port write.6564 *6565 * @returns VINF_SUCCESS.6566 * @param pIemCpu The IEM per CPU data.6567 * @param Port The I/O port.6568 * @param u32Value The value being written.6569 * @param cbValue The size of the access.6570 */6571 static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)6572 {6573 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pIemCpu);6574 if (pEvtRec)6575 {6576 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;6577 pEvtRec->u.IOPortWrite.Port = Port;6578 pEvtRec->u.IOPortWrite.cbValue = cbValue;6579 pEvtRec->u.IOPortWrite.u32Value = u32Value;6580 pEvtRec->pNext = *pIemCpu->ppIemEvtRecNext;6581 *pIemCpu->ppIemEvtRecNext = pEvtRec;6582 }6583 pIemCpu->cIOWrites++;6584 return VINF_SUCCESS;6585 }6586 6587 6588 /**6589 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record6590 * dump to the assertion info.6591 *6592 * @param pEvtRec The record to dump.6593 */6594 static void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)6595 {6596 switch (pEvtRec->enmEvent)6597 {6598 case IEMVERIFYEVENT_IOPORT_READ:6599 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",6600 pEvtRec->u.IOPortWrite.Port,6601 pEvtRec->u.IOPortWrite.cbValue);6602 break;6603 case IEMVERIFYEVENT_IOPORT_WRITE:6604 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",6605 pEvtRec->u.IOPortWrite.Port,6606 pEvtRec->u.IOPortWrite.cbValue,6607 pEvtRec->u.IOPortWrite.u32Value);6608 break;6609 case IEMVERIFYEVENT_RAM_READ:6610 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",6611 pEvtRec->u.RamRead.GCPhys,6612 pEvtRec->u.RamRead.cb);6613 break;6614 case IEMVERIFYEVENT_RAM_WRITE:6615 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*RHxs\n",6616 pEvtRec->u.RamWrite.GCPhys,6617 pEvtRec->u.RamWrite.cb,6618 (int)pEvtRec->u.RamWrite.cb,6619 pEvtRec->u.RamWrite.ab);6620 break;6621 default:6622 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));6623 break;6624 }6625 }6626 6627 6628 /**6629 * Raises an assertion on the specified record, showing the given message with6630 * a record dump attached.6631 *6632 * @param pIemCpu The IEM per CPU data.6633 * @param pEvtRec1 The first record.6634 * @param pEvtRec2 The second record.6635 * @param pszMsg The message explaining why we're asserting.6636 */6637 static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)6638 {6639 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);6640 iemVerifyAssertAddRecordDump(pEvtRec1);6641 iemVerifyAssertAddRecordDump(pEvtRec2);6642 iemOpStubMsg2(pIemCpu);6643 RTAssertPanic();6644 }6645 6646 6647 /**6648 * Raises an assertion on the specified record, showing the given message with6649 * a record dump attached.6650 *6651 * @param pIemCpu The IEM per CPU data.6652 * @param pEvtRec1 The first record.6653 * @param pszMsg The message explaining why we're asserting.6654 */6655 static void iemVerifyAssertRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)6656 {6657 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);6658 iemVerifyAssertAddRecordDump(pEvtRec);6659 iemOpStubMsg2(pIemCpu);6660 RTAssertPanic();6661 }6662 6663 6664 /**6665 * Verifies a write record.6666 *6667 * @param pIemCpu The IEM per CPU data.6668 * @param pEvtRec The write record.6669 */6670 static void iemVerifyWriteRecord(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec)6671 {6672 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);6673 int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);6674 if ( RT_FAILURE(rc)6675 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )6676 {6677 /* fend off ins */6678 if ( !pIemCpu->cIOReads6679 || pEvtRec->u.RamWrite.ab[0] != 0xcc6680 || ( pEvtRec->u.RamWrite.cb != 16681 && pEvtRec->u.RamWrite.cb != 26682 && pEvtRec->u.RamWrite.cb != 4) )6683 {6684 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);6685 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);6686 RTAssertMsg2Add("REM: %.*Rhxs\n"6687 "IEM: %.*Rhxs\n",6688 pEvtRec->u.RamWrite.cb, abBuf,6689 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);6690 iemVerifyAssertAddRecordDump(pEvtRec);6691 iemOpStubMsg2(pIemCpu);6692 RTAssertPanic();6693 }6694 }6695 6696 }6697 6698 /**6699 * Performs the post-execution verfication checks.6700 */6701 static void iemExecVerificationModeCheck(PIEMCPU pIemCpu)6702 {6703 if (IEM_VERIFICATION_ENABLED(pIemCpu))6704 return;6705 6706 /*6707 * Switch back the state.6708 */6709 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));6710 PCPUMCTX pDebugCtx = pIemCpu->CTX_SUFF(pCtx);6711 Assert(pOrgCtx != pDebugCtx);6712 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;6713 6714 /*6715 * Execute the instruction in REM.6716 */6717 int rc = REMR3EmulateInstruction(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu));6718 AssertRC(rc);6719 6720 /*6721 * Compare the register states.6722 */6723 unsigned cDiffs = 0;6724 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))6725 {6726 Log(("REM and IEM ends up with different registers!\n"));6727 6728 # define CHECK_FIELD(a_Field) \6729 do \6730 { \6731 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \6732 { \6733 switch (sizeof(pOrgCtx->a_Field)) \6734 { \6735 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \6736 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \6737 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \6738 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \6739 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \6740 } \6741 cDiffs++; \6742 } \6743 } while (0)6744 6745 # define CHECK_BIT_FIELD(a_Field) \6746 do \6747 { \6748 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \6749 { \6750 RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \6751 cDiffs++; \6752 } \6753 } while (0)6754 6755 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu)))6756 {6757 if (pIemCpu->cInstructions != 1)6758 {6759 RTAssertMsg2Weak(" the FPU state differs\n");6760 cDiffs++;6761 }6762 else6763 RTAssertMsg2Weak(" the FPU state differs - happens the first time...\n");6764 }6765 CHECK_FIELD(rip);6766 uint32_t fFlagsMask = UINT32_MAX;6767 if (pIemCpu->fMulDivHack)6768 fFlagsMask &= ~(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);6769 if (pIemCpu->fShiftOfHack)6770 fFlagsMask &= ~(X86_EFL_OF);6771 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))6772 {6773 RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);6774 CHECK_BIT_FIELD(rflags.Bits.u1CF);6775 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);6776 CHECK_BIT_FIELD(rflags.Bits.u1PF);6777 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);6778 CHECK_BIT_FIELD(rflags.Bits.u1AF);6779 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);6780 CHECK_BIT_FIELD(rflags.Bits.u1ZF);6781 CHECK_BIT_FIELD(rflags.Bits.u1SF);6782 CHECK_BIT_FIELD(rflags.Bits.u1TF);6783 CHECK_BIT_FIELD(rflags.Bits.u1IF);6784 CHECK_BIT_FIELD(rflags.Bits.u1DF);6785 CHECK_BIT_FIELD(rflags.Bits.u1OF);6786 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);6787 CHECK_BIT_FIELD(rflags.Bits.u1NT);6788 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);6789 CHECK_BIT_FIELD(rflags.Bits.u1RF);6790 CHECK_BIT_FIELD(rflags.Bits.u1VM);6791 CHECK_BIT_FIELD(rflags.Bits.u1AC);6792 CHECK_BIT_FIELD(rflags.Bits.u1VIF);6793 CHECK_BIT_FIELD(rflags.Bits.u1VIP);6794 CHECK_BIT_FIELD(rflags.Bits.u1ID);6795 }6796 6797 if (pIemCpu->cIOReads != 1)6798 CHECK_FIELD(rax);6799 CHECK_FIELD(rcx);6800 CHECK_FIELD(rdx);6801 CHECK_FIELD(rbx);6802 CHECK_FIELD(rsp);6803 CHECK_FIELD(rbp);6804 CHECK_FIELD(rsi);6805 CHECK_FIELD(rdi);6806 CHECK_FIELD(r8);6807 CHECK_FIELD(r9);6808 CHECK_FIELD(r10);6809 CHECK_FIELD(r11);6810 CHECK_FIELD(r12);6811 CHECK_FIELD(r13);6812 CHECK_FIELD(cs);6813 CHECK_FIELD(csHid.u64Base);6814 CHECK_FIELD(csHid.u32Limit);6815 CHECK_FIELD(csHid.Attr.u);6816 CHECK_FIELD(ss);6817 CHECK_FIELD(ssHid.u64Base);6818 CHECK_FIELD(ssHid.u32Limit);6819 CHECK_FIELD(ssHid.Attr.u);6820 CHECK_FIELD(ds);6821 CHECK_FIELD(dsHid.u64Base);6822 CHECK_FIELD(dsHid.u32Limit);6823 CHECK_FIELD(dsHid.Attr.u);6824 CHECK_FIELD(es);6825 CHECK_FIELD(esHid.u64Base);6826 CHECK_FIELD(esHid.u32Limit);6827 CHECK_FIELD(esHid.Attr.u);6828 CHECK_FIELD(fs);6829 CHECK_FIELD(fsHid.u64Base);6830 CHECK_FIELD(fsHid.u32Limit);6831 CHECK_FIELD(fsHid.Attr.u);6832 CHECK_FIELD(gs);6833 CHECK_FIELD(gsHid.u64Base);6834 CHECK_FIELD(gsHid.u32Limit);6835 CHECK_FIELD(gsHid.Attr.u);6836 CHECK_FIELD(cr0);6837 CHECK_FIELD(cr2);6838 CHECK_FIELD(cr3);6839 CHECK_FIELD(cr4);6840 CHECK_FIELD(dr[0]);6841 CHECK_FIELD(dr[1]);6842 CHECK_FIELD(dr[2]);6843 CHECK_FIELD(dr[3]);6844 CHECK_FIELD(dr[6]);6845 CHECK_FIELD(dr[7]);6846 CHECK_FIELD(gdtr.cbGdt);6847 CHECK_FIELD(gdtr.pGdt);6848 CHECK_FIELD(idtr.cbIdt);6849 CHECK_FIELD(idtr.pIdt);6850 CHECK_FIELD(ldtr);6851 CHECK_FIELD(ldtrHid.u64Base);6852 CHECK_FIELD(ldtrHid.u32Limit);6853 CHECK_FIELD(ldtrHid.Attr.u);6854 CHECK_FIELD(tr);6855 CHECK_FIELD(trHid.u64Base);6856 CHECK_FIELD(trHid.u32Limit);6857 CHECK_FIELD(trHid.Attr.u);6858 CHECK_FIELD(SysEnter.cs);6859 CHECK_FIELD(SysEnter.eip);6860 CHECK_FIELD(SysEnter.esp);6861 CHECK_FIELD(msrEFER);6862 CHECK_FIELD(msrSTAR);6863 CHECK_FIELD(msrPAT);6864 CHECK_FIELD(msrLSTAR);6865 CHECK_FIELD(msrCSTAR);6866 CHECK_FIELD(msrSFMASK);6867 CHECK_FIELD(msrKERNELGSBASE);6868 6869 if (cDiffs != 0)6870 {6871 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);6872 iemOpStubMsg2(pIemCpu);6873 RTAssertPanic();6874 }6875 # undef CHECK_FIELD6876 # undef CHECK_BIT_FIELD6877 }6878 6879 /*6880 * If the register state compared fine, check the verification event6881 * records.6882 */6883 if (cDiffs == 0)6884 {6885 /*6886 * Compare verficiation event records.6887 * - I/O port accesses should be a 1:1 match.6888 */6889 PIEMVERIFYEVTREC pIemRec = pIemCpu->pIemEvtRecHead;6890 PIEMVERIFYEVTREC pOtherRec = pIemCpu->pOtherEvtRecHead;6891 while (pIemRec && pOtherRec)6892 {6893 /* Since we might miss RAM writes and reads, ignore reads and check6894 that any written memory is the same extra ones. */6895 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)6896 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)6897 && pIemRec->pNext)6898 {6899 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)6900 iemVerifyWriteRecord(pIemCpu, pIemRec);6901 pIemRec = pIemRec->pNext;6902 }6903 6904 /* Do the compare. */6905 if (pIemRec->enmEvent != pOtherRec->enmEvent)6906 {6907 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Type mismatches");6908 break;6909 }6910 bool fEquals;6911 switch (pIemRec->enmEvent)6912 {6913 case IEMVERIFYEVENT_IOPORT_READ:6914 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port6915 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;6916 break;6917 case IEMVERIFYEVENT_IOPORT_WRITE:6918 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port6919 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue6920 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;6921 break;6922 case IEMVERIFYEVENT_RAM_READ:6923 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys6924 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;6925 break;6926 case IEMVERIFYEVENT_RAM_WRITE:6927 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys6928 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb6929 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);6930 break;6931 default:6932 fEquals = false;6933 break;6934 }6935 if (!fEquals)6936 {6937 iemVerifyAssertRecords(pIemCpu, pIemRec, pOtherRec, "Mismatch");6938 break;6939 }6940 6941 /* advance */6942 pIemRec = pIemRec->pNext;6943 pOtherRec = pOtherRec->pNext;6944 }6945 6946 /* Ignore extra writes and reads. */6947 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))6948 {6949 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)6950 iemVerifyWriteRecord(pIemCpu, pIemRec);6951 pIemRec = pIemRec->pNext;6952 }6953 if (pIemRec != NULL)6954 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra IEM record!");6955 else if (pOtherRec != NULL)6956 iemVerifyAssertRecord(pIemCpu, pIemRec, "Extra Other record!");6957 }6958 pIemCpu->CTX_SUFF(pCtx) = pOrgCtx;6959 }6960 6961 #else /* !IEM_VERIFICATION_MODE || !IN_RING3 */6962 6963 /* stubs */6964 static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)6965 {6966 return VERR_INTERNAL_ERROR;6967 }6968 6969 static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)6970 {6971 return VERR_INTERNAL_ERROR;6972 }6973 6974 #endif /* !IEM_VERIFICATION_MODE || !IN_RING3 */6975 6976 6977 /**6978 * Execute one instruction.6979 *6980 * @return Strict VBox status code.6981 * @param pVCpu The current virtual CPU.6982 */6983 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)6984 {6985 PIEMCPU pIemCpu = &pVCpu->iem.s;6986 6987 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)6988 iemExecVerificationModeSetup(pIemCpu);6989 #endif6990 #ifdef LOG_ENABLED6991 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);6992 if (LogIs2Enabled())6993 {6994 char szInstr[256];6995 uint32_t cbInstr = 0;6996 DBGFR3DisasInstrEx(pVCpu->pVMR3, pVCpu->idCpu, 0, 0,6997 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,6998 szInstr, sizeof(szInstr), &cbInstr);6999 7000 Log2(("**** "7001 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"7002 " eip=%08x esp=%08x ebp=%08x iopl=%d\n"7003 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"7004 " %s\n"7005 ,7006 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,7007 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL,7008 (RTSEL)pCtx->cs, (RTSEL)pCtx->ss, (RTSEL)pCtx->ds, (RTSEL)pCtx->es,7009 (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, pCtx->eflags.u,7010 szInstr));7011 }7012 #endif7013 7014 /*7015 * Do the decoding and emulation.7016 */7017 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);7018 if (rcStrict != VINF_SUCCESS)7019 return rcStrict;7020 7021 uint8_t b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);7022 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);7023 if (rcStrict == VINF_SUCCESS)7024 pIemCpu->cInstructions++;7025 //#ifdef DEBUG7026 // AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));7027 //#endif7028 7029 /* Execute the next instruction as well if a cli, pop ss or7030 mov ss, Gr has just completed successfully. */7031 if ( rcStrict == VINF_SUCCESS7032 && VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)7033 && EMGetInhibitInterruptsPC(pVCpu) == pIemCpu->CTX_SUFF(pCtx)->rip )7034 {7035 rcStrict = iemInitDecoderAndPrefetchOpcodes(pIemCpu);7036 if (rcStrict == VINF_SUCCESS)7037 {7038 b; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &b);7039 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);7040 if (rcStrict == VINF_SUCCESS)7041 pIemCpu->cInstructions++;7042 }7043 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));7044 }7045 7046 /*7047 * Assert some sanity.7048 */7049 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)7050 iemExecVerificationModeCheck(pIemCpu);7051 #endif7052 return rcStrict;7053 }7054
Note:
See TracChangeset
for help on using the changeset viewer.