VirtualBox

Changeset 100731 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jul 28, 2023 10:22:22 PM (17 months ago)
Author:
vboxsync
Message:

VMM/IEM: More on recompiling branch instruction. bugref:10369

Location:
trunk/src/VBox/VMM
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r100694 r100731  
    799799    pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
    800800#elif 1
    801     pVCpu->iem.s.pbInstrBuf = NULL;
     801    pVCpu->iem.s.pbInstrBuf      = NULL;
     802    pVCpu->iem.s.cbInstrBufTotal = 0;
    802803    RT_NOREF(cbInstr);
    803804#else
     
    10031004            if (cbDst <= cbMaxRead)
    10041005            {
     1006                pVCpu->iem.s.fTbCrossedPage     |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
     1007                pVCpu->iem.s.GCPhysInstrBufPrev  = pVCpu->iem.s.GCPhysInstrBuf;
     1008
    10051009                pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
    10061010                pVCpu->iem.s.uInstrBufPc      = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
    10071011                pVCpu->iem.s.GCPhysInstrBuf   = pTlbe->GCPhys;
    10081012                pVCpu->iem.s.pbInstrBuf       = pTlbe->pbMappingR3;
    1009                 pVCpu->iem.s.fTbCrossedPage  |= offPg == 0;
    10101013                memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
    10111014                return;
     
    10921095            /* Update the state and probably return. */
    10931096            uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
     1097            pVCpu->iem.s.fTbCrossedPage     |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
     1098            pVCpu->iem.s.GCPhysInstrBufPrev  = pVCpu->iem.s.GCPhysInstrBuf;
     1099
    10941100            pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
    10951101            pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
    10961102            pVCpu->iem.s.cbInstrBuf       = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
    1097             pVCpu->iem.s.cbInstrBufTotal  = X86_PAGE_SIZE;
     1103            pVCpu->iem.s.cbInstrBufTotal  = X86_PAGE_SIZE; /** @todo ??? */
    10981104            pVCpu->iem.s.GCPhysInstrBuf   = pTlbe->GCPhys;
    10991105            pVCpu->iem.s.uInstrBufPc      = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
    11001106            pVCpu->iem.s.pbInstrBuf       = NULL;
    1101             pVCpu->iem.s.fTbCrossedPage  |= offPg == 0;
    11021107            if (cbToRead == cbDst)
    11031108                return;
     
    40694074
    40704075    /* Flush the prefetch buffer. */
    4071 #ifdef IEM_WITH_CODE_TLB
    4072     pVCpu->iem.s.pbInstrBuf = NULL;
    4073 #else
    4074     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    4075 #endif
     4076    iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
    40764077
    40774078    /*
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h

    r100714 r100731  
    792792    IEMOP_HLP_NO_64BIT();
    793793    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    794     IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_END_TB/*?*/,
     794    /** @todo eliminate END_TB here */
     795    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_END_TB,
    795796                                iemCImpl_pop_Sreg, X86_SREG_CS, pVCpu->iem.s.enmEffOpSize);
    796797}
     
    50185019     */
    50195020    uint8_t const iSegReg = IEM_GET_MODRM_REG_8(bRm);
    5020     if (   iSegReg > X86_SREG_GS)
     5021    if (iSegReg > X86_SREG_GS)
    50215022        IEMOP_RAISE_INVALID_OPCODE_RET(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
    50225023
     
    51595160     */
    51605161    uint8_t const iSegReg = IEM_GET_MODRM_REG_8(bRm);
     5162    /** @todo r=bird: What does 8086 do here wrt CS? */
    51615163    if (   iSegReg == X86_SREG_CS
    51625164        || iSegReg > X86_SREG_GS)
     
    56825684    uint16_t u16Sel;  IEM_OPCODE_GET_NEXT_U16(&u16Sel);
    56835685    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    5684     IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
     5686    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_FAR
     5687                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
    56855688                                iemCImpl_callf, u16Sel, off32Seg, pVCpu->iem.s.enmEffOpSize);
    56865689}
     
    72077210    {
    72087211        case IEMMODE_16BIT:
    7209             IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retn_iw_16, u16Imm);
     7212            IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_iw_16, u16Imm);
    72107213        case IEMMODE_32BIT:
    7211             IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retn_iw_32, u16Imm);
     7214            IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_iw_32, u16Imm);
    72127215        case IEMMODE_64BIT:
    7213             IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retn_iw_64, u16Imm);
     7216            IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_iw_64, u16Imm);
    72147217        IEM_NOT_REACHED_DEFAULT_CASE_RET();
    72157218    }
     
    72287231    {
    72297232        case IEMMODE_16BIT:
    7230             IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retn_16);
     7233            IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_16);
    72317234        case IEMMODE_32BIT:
    7232             IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retn_32);
     7235            IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_32);
    72337236        case IEMMODE_64BIT:
    7234             IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retn_64);
     7237            IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_64);
    72357238        IEM_NOT_REACHED_DEFAULT_CASE_RET();
    72367239    }
     
    75167519    uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
    75177520    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    7518     IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, u16Imm);
     7521    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE,
     7522                                iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, u16Imm);
    75197523}
    75207524
     
    75277531    IEMOP_MNEMONIC(retf, "retf");
    75287532    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    7529     IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, 0);
     7533    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE,
     7534                                iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, 0);
    75307535}
    75317536
     
    75387543    IEMOP_MNEMONIC(int3, "int3");
    75397544    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    7540     IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
     7545    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     7546                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
    75417547                                iemCImpl_int, X86_XCPT_BP, IEMINT_INT3);
    75427548}
     
    75517557    uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
    75527558    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    7553     IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
     7559    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     7560                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
    75547561                                iemCImpl_int, u8Int, IEMINT_INTN);
    75557562}
     
    75637570    IEMOP_MNEMONIC(into, "into");
    75647571    IEMOP_HLP_NO_64BIT();
    7565     IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_BRANCH_COND | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
     7572    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL
     7573                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
    75667574                                iemCImpl_int, X86_XCPT_OF, IEMINT_INTO);
    75677575}
     
    75757583    IEMOP_MNEMONIC(iret, "iret");
    75767584    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    7577     IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
     7585    IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     7586                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
    75787587                                iemCImpl_iret, pVCpu->iem.s.enmEffOpSize);
    75797588}
     
    1148411493        {
    1148511494            uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
    11486             IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_UNCOND, iemCImpl_call_rel_16, (int16_t)u16Imm);
     11495            IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_RELATIVE, iemCImpl_call_rel_16, (int16_t)u16Imm);
    1148711496        }
    1148811497
     
    1149011499        {
    1149111500            uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
    11492             IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_UNCOND, iemCImpl_call_rel_32, (int32_t)u32Imm);
     11501            IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_RELATIVE, iemCImpl_call_rel_32, (int32_t)u32Imm);
    1149311502        }
    1149411503
     
    1149611505        {
    1149711506            uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
    11498             IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_UNCOND, iemCImpl_call_rel_64, u64Imm);
     11507            IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_RELATIVE, iemCImpl_call_rel_64, u64Imm);
    1149911508        }
    1150011509
     
    1155511564    uint16_t u16Sel;  IEM_OPCODE_GET_NEXT_U16(&u16Sel);
    1155611565    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    11557     IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
     11566    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_FAR
     11567                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
    1155811568                                iemCImpl_FarJmp, u16Sel, off32Seg, pVCpu->iem.s.enmEffOpSize);
    1155911569}
     
    1164111651    IEMOP_HLP_MIN_386();
    1164211652    /** @todo testcase! */
    11643     IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
     11653    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     11654                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
    1164411655                                iemCImpl_int, X86_XCPT_DB, IEMINT_INT1);
    1164511656}
     
    1264012651                IEM_MC_ARG(uint16_t, u16Target, 0);
    1264112652                IEM_MC_FETCH_GREG_U16(u16Target, IEM_GET_MODRM_RM(pVCpu, bRm));
    12642                 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_call_16, u16Target);
     12653                IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_16, u16Target);
    1264312654                IEM_MC_END();
    1264412655                break;
     
    1264912660                IEM_MC_ARG(uint32_t, u32Target, 0);
    1265012661                IEM_MC_FETCH_GREG_U32(u32Target, IEM_GET_MODRM_RM(pVCpu, bRm));
    12651                 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_call_32, u32Target);
     12662                IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_32, u32Target);
    1265212663                IEM_MC_END();
    1265312664                break;
     
    1265812669                IEM_MC_ARG(uint64_t, u64Target, 0);
    1265912670                IEM_MC_FETCH_GREG_U64(u64Target, IEM_GET_MODRM_RM(pVCpu, bRm));
    12660                 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_call_64, u64Target);
     12671                IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_64, u64Target);
    1266112672                IEM_MC_END();
    1266212673                break;
     
    1267712688                IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    1267812689                IEM_MC_FETCH_MEM_U16(u16Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);
    12679                 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_call_16, u16Target);
     12690                IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_16, u16Target);
    1268012691                IEM_MC_END();
    1268112692                break;
     
    1268812699                IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    1268912700                IEM_MC_FETCH_MEM_U32(u32Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);
    12690                 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_call_32, u32Target);
     12701                IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_32, u32Target);
    1269112702                IEM_MC_END();
    1269212703                break;
     
    1269912710                IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    1270012711                IEM_MC_FETCH_MEM_U64(u64Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);
    12701                 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_call_64, u64Target);
     12712                IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_64, u64Target);
    1270212713                IEM_MC_END();
    1270312714                break;
     
    1273512746            IEM_MC_FETCH_MEM_U16(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \
    1273612747            IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 2); \
    12737             IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \
     12748            IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR \
     12749                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \
    1273812750                                a_fnCImpl, u16Sel, offSeg, enmEffOpSize); \
    1273912751            IEM_MC_END(); \
     
    1275012762            IEM_MC_FETCH_MEM_U32(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \
    1275112763            IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 4); \
    12752             IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \
     12764            IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR \
     12765                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \
    1275312766                                a_fnCImpl, u16Sel, offSeg, enmEffOpSize); \
    1275412767            IEM_MC_END(); \
     
    1276612779            IEM_MC_FETCH_MEM_U64(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \
    1276712780            IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 8); \
    12768             IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_MODE /* no gates */, \
     12781            IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE /* no gates */, \
    1276912782                                a_fnCImpl, u16Sel, offSeg, enmEffOpSize); \
    1277012783            IEM_MC_END(); \
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r100714 r100731  
    14141414    IEMOP_HLP_VMX_INSTR("vmlaunch", kVmxVDiag_Vmentry);
    14151415    IEMOP_HLP_DONE_DECODING();
    1416     IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
     1416    IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     1417                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
    14171418                                iemCImpl_vmlaunch);
    14181419}
     
    14341435    IEMOP_HLP_VMX_INSTR("vmresume", kVmxVDiag_Vmentry);
    14351436    IEMOP_HLP_DONE_DECODING();
    1436     IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
     1437    IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     1438                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
    14371439                                iemCImpl_vmresume);
    14381440}
     
    15761578    IEMOP_MNEMONIC(vmrun, "vmrun");
    15771579    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo check prefix effect on the SVM instructions. ASSUMING no lock for now. */
    1578     IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT,
     1580    IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     1581                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
    15791582                                iemCImpl_vmrun);
    15801583}
     
    19641967    IEMOP_MNEMONIC(syscall, "syscall"); /** @todo 286 LOADALL   */
    19651968    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    1966     IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB,
     1969    IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     1970                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB,
    19671971                                iemCImpl_syscall);
    19681972}
     
    19831987    IEMOP_MNEMONIC(sysret, "sysret");  /** @todo 386 LOADALL   */
    19841988    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    1985     IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB,
     1989    IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     1990                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB,
    19861991                                iemCImpl_sysret, pVCpu->iem.s.enmEffOpSize);
    19871992}
     
    51765181    IEMOP_MNEMONIC0(FIXED, SYSENTER, sysenter, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0);
    51775182    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    5178     IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
     5183    IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     5184                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
    51795185                                iemCImpl_sysenter);
    51805186}
     
    51855191    IEMOP_MNEMONIC0(FIXED, SYSEXIT, sysexit, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0);
    51865192    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    5187     IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
     5193    IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     5194                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
    51885195                                iemCImpl_sysexit, pVCpu->iem.s.enmEffOpSize);
    51895196}
     
    95729579    IEMOP_HLP_MIN_386(); /* 386SL and later. */
    95739580    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    9574     IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
     9581    IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR
     9582                                | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB,
    95759583                                iemCImpl_rsm);
    95769584}
  • trunk/src/VBox/VMM/VMMAll/IEMAllThreadedFunctionsBltIn.cpp

    r100694 r100731  
    126126 */
    127127#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
    128         Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges < RT_ELEMENTS((a_pTb)->aRanges)); \
     128        Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
    129129        Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
    130130        /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
     
    174174 *
    175175 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
    176  * it is an inter-page branch.
     176 * it is an inter-page branch and also check the page offset.
    177177 *
    178178 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
    179179 */
    180 #define BODY_LOAD_TLB_FOR_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
     180#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
    181181        /* Is RIP within the current code page? */ \
    182182        Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
     
    184184        uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
    185185        if (off < pVCpu->iem.s.cbInstrBufTotal) \
     186        { \
    186187            Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
     188            Assert(pVCpu->iem.s.pbInstrBuf); \
     189            RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
     190                                                     | pTb->aRanges[(a_idxRange)].offPhysPage; \
     191            if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
     192            { /* we're good */ } \
     193            else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
     194            { \
     195                Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
     196                      (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
     197                      pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
     198                RT_NOREF(a_cbInstr); \
     199                return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
     200            } \
     201            else \
     202            { \
     203                Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
     204                      (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
     205                      pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
     206                RT_NOREF(a_cbInstr); \
     207                return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
     208            } \
     209        } \
    187210        else \
    188211        { \
     
    192215            pVCpu->iem.s.offInstrNextByte = 0; \
    193216            iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
     217            Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
    194218            \
    195             RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
    196             if (RT_LIKELY(   pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
    197                           && pVCpu->iem.s.pbInstrBuf)) \
     219            RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
     220                                                     | pTb->aRanges[(a_idxRange)].offPhysPage; \
     221            uint64_t const offNew                    = uPc - pVCpu->iem.s.uInstrBufPc; \
     222            if (   GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
     223                && pVCpu->iem.s.pbInstrBuf) \
    198224            { /* likely */ } \
     225            else if (   pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
     226                     && pVCpu->iem.s.pbInstrBuf) \
     227            { \
     228                Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
     229                      (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
     230                      pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
     231                RT_NOREF(a_cbInstr); \
     232                return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
     233            } \
    199234            else \
    200235            { \
    201                 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
     236                Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
    202237                      (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
    203                       pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
     238                      pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
    204239                RT_NOREF(a_cbInstr); \
    205240                return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
     
    208243    } while(0)
    209244
     245/**
     246 * Macro that implements PC check after a conditional branch.
     247 */
     248#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
     249        /* Is RIP within the current code page? */ \
     250        Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
     251        uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
     252        uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
     253        Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
     254        RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
     255                                                 | pTb->aRanges[(a_idxRange)].offPhysPage; \
     256        if (   GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
     257            && off < pVCpu->iem.s.cbInstrBufTotal) \
     258        { /* we're good */ } \
     259        else \
     260        { \
     261            Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
     262                  (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
     263                  pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
     264            RT_NOREF(a_cbInstr); \
     265            return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
     266        } \
     267    } while(0)
     268
    210269
    211270/**
     
    255314}
    256315
     316
     317/*
     318 * Post-branching checkers.
     319 */
     320
     321/**
     322 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
     323 * after conditional branching within the same page.
     324 *
     325 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
     326 */
     327IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes,
     328                  (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
     329{
     330    PCIEMTB const  pTb      = pVCpu->iem.s.pCurTbR3;
     331    uint32_t const cbInstr  = (uint32_t)uParam0;
     332    uint32_t const idxRange = (uint32_t)uParam1;
     333    uint32_t const offRange = (uint32_t)uParam2;
     334    //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
     335    BODY_CHECK_CS_LIM(cbInstr);
     336    BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
     337    BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
     338    //LogFunc(("okay\n"));
     339    return VINF_SUCCESS;
     340}
     341
     342
     343/**
     344 * Built-in function for checking the PC and checking opcodes after conditional
     345 * branching within the same page.
     346 *
     347 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
     348 */
     349IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckPcAndOpcodes,
     350                  (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
     351{
     352    PCIEMTB const  pTb      = pVCpu->iem.s.pCurTbR3;
     353    uint32_t const cbInstr  = (uint32_t)uParam0;
     354    uint32_t const idxRange = (uint32_t)uParam1;
     355    uint32_t const offRange = (uint32_t)uParam2;
     356    //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
     357    BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
     358    BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
     359    //LogFunc(("okay\n"));
     360    return VINF_SUCCESS;
     361}
     362
     363
     364/**
     365 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
     366 * transitioning to a different code page.
     367 *
     368 * The code page transition can either be natural over onto the next page (with
     369 * the instruction starting at page offset zero) or by means of branching.
     370 *
     371 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
     372 */
     373IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,
     374                  (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
     375{
     376    PCIEMTB const  pTb      = pVCpu->iem.s.pCurTbR3;
     377    uint32_t const cbInstr  = (uint32_t)uParam0;
     378    uint32_t const idxRange = (uint32_t)uParam1;
     379    uint32_t const offRange = (uint32_t)uParam2;
     380    //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
     381    BODY_CHECK_CS_LIM(cbInstr);
     382    BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
     383    BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
     384    //LogFunc(("okay\n"));
     385    return VINF_SUCCESS;
     386}
     387
     388
     389/**
     390 * Built-in function for loading TLB and checking opcodes when transitioning to
     391 * a different code page.
     392 *
     393 * The code page transition can either be natural over onto the next page (with
     394 * the instruction starting at page offset zero) or by means of branching.
     395 *
     396 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
     397 */
     398IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,
     399                  (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
     400{
     401    PCIEMTB const  pTb      = pVCpu->iem.s.pCurTbR3;
     402    uint32_t const cbInstr  = (uint32_t)uParam0;
     403    uint32_t const idxRange = (uint32_t)uParam1;
     404    uint32_t const offRange = (uint32_t)uParam2;
     405    //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
     406    BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
     407    BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
     408    //LogFunc(("okay\n"));
     409    return VINF_SUCCESS;
     410}
     411
     412
     413
     414/*
     415 * Natural page crossing checkers.
     416 */
    257417
    258418/**
     
    311471/**
    312472 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
    313  * transitioning to a different code page.
    314  *
    315  * The code page transition can either be natural over onto the next page (with
    316  * the instruction starting at page offset zero) or by means of branching.
    317  *
    318  * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
    319  */
    320 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,
    321                   (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
    322 {
    323     PCIEMTB const  pTb      = pVCpu->iem.s.pCurTbR3;
    324     uint32_t const cbInstr  = (uint32_t)uParam0;
    325     uint32_t const idxRange = (uint32_t)uParam1;
    326     uint32_t const offRange = (uint32_t)uParam2;
    327     BODY_CHECK_CS_LIM(cbInstr);
    328     BODY_LOAD_TLB_FOR_BRANCH(pTb, idxRange, cbInstr);
    329     BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
    330     return VINF_SUCCESS;
    331 }
    332 
    333 
    334 /**
    335  * Built-in function for loading TLB and checking opcodes when transitioning to
    336  * a different code page.
    337  *
    338  * The code page transition can either be natural over onto the next page (with
    339  * the instruction starting at page offset zero) or by means of branching.
    340  *
    341  * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
    342  */
    343 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,
    344                   (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
    345 {
    346     PCIEMTB const  pTb      = pVCpu->iem.s.pCurTbR3;
    347     uint32_t const cbInstr  = (uint32_t)uParam0;
    348     uint32_t const idxRange = (uint32_t)uParam1;
    349     uint32_t const offRange = (uint32_t)uParam2;
    350     BODY_LOAD_TLB_FOR_BRANCH(pTb, idxRange, cbInstr);
    351     BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
    352     return VINF_SUCCESS;
    353 }
    354 
    355 
    356 /**
    357  * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
    358473 * advancing naturally to a different code page.
    359474 *
     
    402517}
    403518
     519
     520/**
     521 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
     522 * advancing naturally to a different code page with first instr at byte 0.
     523 *
     524 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
     525 */
     526IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb,
     527                  (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
     528{
     529    PCIEMTB const  pTb         = pVCpu->iem.s.pCurTbR3;
     530    uint32_t const cbInstr     = (uint32_t)uParam0;
     531    uint32_t const idxRange    = (uint32_t)uParam1;
     532    Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
     533    BODY_CHECK_CS_LIM(cbInstr);
     534    BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
     535    Assert(pVCpu->iem.s.offCurInstrStart == 0);
     536    BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
     537    return VINF_SUCCESS;
     538}
     539
     540
     541/**
     542 * Built-in function for loading TLB and checking opcodes when advancing
     543 * naturally to a different code page with first instr at byte 0.
     544 *
     545 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
     546 */
     547IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb,
     548                  (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
     549{
     550    PCIEMTB const  pTb         = pVCpu->iem.s.pCurTbR3;
     551    uint32_t const cbInstr     = (uint32_t)uParam0;
     552    uint32_t const idxRange    = (uint32_t)uParam1;
     553    Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
     554    BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
     555    Assert(pVCpu->iem.s.offCurInstrStart == 0);
     556    BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
     557    return VINF_SUCCESS;
     558}
     559
  • trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py

    r100701 r100731  
    197197
    198198    ## IEM_CIMPL_F_XXX flags that we know.
     199    ## The value indicates whether it terminates the TB or not. The goal is to
     200    ## improve the recompiler so all but END_TB will be False.
    199201    kdCImplFlags = {
    200         'IEM_CIMPL_F_MODE':             True,
    201         'IEM_CIMPL_F_BRANCH_UNCOND':    False,
    202         'IEM_CIMPL_F_BRANCH_COND':      False,
    203         'IEM_CIMPL_F_BRANCH_INDIR':     True,
    204         'IEM_CIMPL_F_RFLAGS':           False,
    205         'IEM_CIMPL_F_STATUS_FLAGS':     False,
    206         'IEM_CIMPL_F_VMEXIT':           False,
    207         'IEM_CIMPL_F_FPU':              False,
    208         'IEM_CIMPL_F_REP':              False,
    209         'IEM_CIMPL_F_END_TB':           False,
    210         'IEM_CIMPL_F_XCPT':             True,
     202        'IEM_CIMPL_F_MODE':                 True,
     203        'IEM_CIMPL_F_BRANCH_DIRECT':        False,
     204        'IEM_CIMPL_F_BRANCH_INDIRECT':      False,
     205        'IEM_CIMPL_F_BRANCH_RELATIVE':      False,
     206        'IEM_CIMPL_F_BRANCH_FAR':           True,
     207        'IEM_CIMPL_F_BRANCH_CONDITIONAL':   False,
     208        'IEM_CIMPL_F_RFLAGS':               False,
     209        'IEM_CIMPL_F_STATUS_FLAGS':         False,
     210        'IEM_CIMPL_F_VMEXIT':               True,
     211        'IEM_CIMPL_F_FPU':                  False,
     212        'IEM_CIMPL_F_REP':                  True,
     213        'IEM_CIMPL_F_END_TB':               True,
     214        'IEM_CIMPL_F_XCPT':                 True,
    211215    };
    212216
     
    506510
    507511
    508     def analyzeCodeOperation(self, aoStmts):
     512    def analyzeCodeOperation(self, aoStmts, fSeenConditional = False):
    509513        """
    510514        Analyzes the code looking clues as to additional side-effects.
     
    513517        collecting these in self.dsCImplFlags.
    514518        """
    515         fSeenConditional = False;
    516519        for oStmt in aoStmts:
    517520            # Pick up hints from CIMPL calls and deferals.
     
    526529                            self.raiseProblem('Unknown CIMPL flag value: %s' % (sFlag,));
    527530
    528             # Check for conditional so we can categorize any branches correctly.
    529             if (   oStmt.sName.startswith('IEM_MC_IF_')
    530                 or oStmt.sName == 'IEM_MC_ENDIF'):
    531                 fSeenConditional = True;
    532 
    533531            # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
    534532            elif oStmt.sName.startswith('IEM_MC_SET_RIP'):
    535533                assert not fSeenConditional;
    536                 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIR'] = True;
     534                self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
    537535            elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
     536                self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
    538537                if fSeenConditional:
    539                     self.dsCImplFlags['IEM_CIMPL_F_BRANCH_COND'] = True;
    540                 else:
    541                     self.dsCImplFlags['IEM_CIMPL_F_BRANCH_UNCOND'] = True;
     538                    self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
    542539
    543540            # Process branches of conditionals recursively.
    544541            if isinstance(oStmt, iai.McStmtCond):
    545                 self.analyzeCodeOperation(oStmt.aoIfBranch);
     542                self.analyzeCodeOperation(oStmt.aoIfBranch, True);
    546543                if oStmt.aoElseBranch:
    547                     self.analyzeCodeOperation(oStmt.aoElseBranch);
     544                    self.analyzeCodeOperation(oStmt.aoElseBranch, True);
    548545
    549546        return True;
     
    914911        aoStmts.append(iai.McCppGeneric('IEM_MC2_END_EMIT_CALLS(' + sCImplFlags + ');',
    915912                                        cchIndent = cchIndent)); # For closing the scope.
     913
     914        # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
     915        # indicates we should do so.
     916        asEndTbFlags      = [];
     917        asTbBranchedFlags = [];
     918        for sFlag in self.dsCImplFlags:
     919            if self.kdCImplFlags[sFlag] is True:
     920                asEndTbFlags.append(sFlag);
     921            elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
     922                asTbBranchedFlags.append(sFlag);
     923        if asTbBranchedFlags:
     924            aoStmts.extend([
     925                iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
     926                                 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
     927                                 cchIndent = cchIndent), # Using the inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
     928                #iai.McCppGeneric('pVCpu->iem.s.fTbBranched = %s;'
     929                #                 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
     930                #                 cchIndent = cchIndent),
     931                #iai.McCppGeneric('pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;', cchIndent = cchIndent),
     932                #iai.McCppGeneric('pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;', cchIndent = cchIndent),
     933            ]);
     934        if asEndTbFlags:
     935            aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
     936                                            cchIndent = cchIndent));
     937
    916938        return aoStmts;
    917939
     
    10961118        #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
    10971119        aoDecoderStmts = [];
    1098 
    1099         # Take a very simple approach to problematic instructions for now.
    1100         if cDepth == 0:
    1101             dsCImplFlags = {};
    1102             for oVar in self.aoVariations:
    1103                 dsCImplFlags.update(oVar.dsCImplFlags);
    1104             if (   'IEM_CIMPL_F_BRANCH_UNCOND' in dsCImplFlags
    1105                 or 'IEM_CIMPL_F_BRANCH_COND'   in dsCImplFlags
    1106                 or 'IEM_CIMPL_F_BRANCH_INDIR'  in dsCImplFlags
    1107                 or 'IEM_CIMPL_F_MODE'          in dsCImplFlags
    1108                 or 'IEM_CIMPL_F_REP'           in dsCImplFlags):
    1109                 aoDecoderStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true;'));
    11101120
    11111121        for oStmt in aoStmts:
     
    12821292            '    kIemThreadedFunc_CheckCsLim,',
    12831293            '    kIemThreadedFunc_CheckCsLimAndOpcodes,',
     1294            '    kIemThreadedFunc_CheckOpcodes,',
     1295            '    kIemThreadedFunc_CheckCsLimAndPcAndOpcodes,',
     1296            '    kIemThreadedFunc_CheckPcAndOpcodes,',
    12841297            '    kIemThreadedFunc_CheckCsLimAndOpcodesAcrossPageLoadingTlb,',
     1298            '    kIemThreadedFunc_CheckOpcodesAcrossPageLoadingTlb,',
    12851299            '    kIemThreadedFunc_CheckCsLimAndOpcodesLoadingTlb,',
     1300            '    kIemThreadedFunc_CheckOpcodesLoadingTlb,',
    12861301            '    kIemThreadedFunc_CheckCsLimAndOpcodesOnNextPageLoadingTlb,',
    1287             '    kIemThreadedFunc_CheckOpcodes,',
    1288             '    kIemThreadedFunc_CheckOpcodesAcrossPageLoadingTlb,',
    1289             '    kIemThreadedFunc_CheckOpcodesLoadingTlb,',
    12901302            '    kIemThreadedFunc_CheckOpcodesOnNextPageLoadingTlb,',
     1303            '    kIemThreadedFunc_CheckCsLimAndOpcodesOnNewPageLoadingTlb,',
     1304            '    kIemThreadedFunc_CheckOpcodesOnNewPageLoadingTlb,',
    12911305        ];
    12921306        iThreadedFunction = 1;
     
    14491463                   + '    iemThreadedFunc_BltIn_CheckCsLim,\n'
    14501464                   + '    iemThreadedFunc_BltIn_CheckCsLimAndOpcodes,\n'
     1465                   + '    iemThreadedFunc_BltIn_CheckOpcodes,\n'
     1466                   + '    iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes,\n'
     1467                   + '    iemThreadedFunc_BltIn_CheckPcAndOpcodes,\n'
    14511468                   + '    iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,\n'
     1469                   + '    iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,\n'
    14521470                   + '    iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,\n'
     1471                   + '    iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,\n'
    14531472                   + '    iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,\n'
    1454                    + '    iemThreadedFunc_BltIn_CheckOpcodes,\n'
    1455                    + '    iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,\n'
    1456                    + '    iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,\n'
    14571473                   + '    iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb,\n'
     1474                   + '    iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb,\n'
     1475                   + '    iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb,\n'
    14581476                   );
    14591477        iThreadedFunction = 1;
     
    14901508                   + '    "BltIn_CheckCsLim",\n'
    14911509                   + '    "BltIn_CheckCsLimAndOpcodes",\n'
     1510                   + '    "BltIn_CheckOpcodes",\n'
     1511                   + '    "BltIn_CheckCsLimAndPcAndOpcodes",\n'
     1512                   + '    "BltIn_CheckPcAndOpcodes",\n'
    14921513                   + '    "BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb",\n'
     1514                   + '    "BltIn_CheckOpcodesAcrossPageLoadingTlb",\n'
    14931515                   + '    "BltIn_CheckCsLimAndOpcodesLoadingTlb",\n'
     1516                   + '    "BltIn_CheckOpcodesLoadingTlb",\n'
    14941517                   + '    "BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb",\n'
    1495                    + '    "BltIn_CheckOpcodes",\n'
    1496                    + '    "BltIn_CheckOpcodesAcrossPageLoadingTlb",\n'
    1497                    + '    "BltIn_CheckOpcodesLoadingTlb",\n'
    14981518                   + '    "BltIn_CheckOpcodesOnNextPageLoadingTlb",\n'
     1519                   + '    "BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb",\n'
     1520                   + '    "BltIn_CheckOpcodesOnNewPageLoadingTlb",\n'
    14991521                   );
    15001522        iThreadedFunction = 1;
  • trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp

    r100701 r100731  
    66 *      - Level 1  (Log)  : Errors, exceptions, interrupts and such major events. [same as IEM]
    77 *      - Flow  (LogFlow) :
    8  *      - Level 2  (Log2) :
    9  *      - Level 3  (Log3) : More detailed enter/exit IEM state info. [same as IEM]
     8 *      - Level 2  (Log2) : Basic instruction execution state info. [same as IEM]
     9 *      - Level 3  (Log3) : More detailed execution state info. [same as IEM]
    1010 *      - Level 4  (Log4) : Decoding mnemonics w/ EIP. [same as IEM]
    1111 *      - Level 5  (Log5) : Decoding details. [same as IEM]
     
    112112
    113113/*********************************************************************************************************************************
    114 *   Structures and Typedefs                                                                                                      *
    115 *********************************************************************************************************************************/
    116 
    117 
    118 
    119 /*********************************************************************************************************************************
    120114*   Internal Functions                                                                                                           *
    121115*********************************************************************************************************************************/
     
    130124
    131125
     126/*
     127 * Override IEM_MC_CALC_RM_EFF_ADDR to use iemOpHlpCalcRmEffAddrJmpEx and produce uEffAddrInfo.
     128 */
    132129#undef IEM_MC_CALC_RM_EFF_ADDR
    133130#ifndef IEM_WITH_SETJMP
     
    141138#endif
    142139
     140/*
     141 * Override the IEM_MC_REL_JMP_S*_AND_FINISH macros to check for zero byte jumps.
     142 */
     143#undef IEM_MC_REL_JMP_S8_AND_FINISH
     144#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) do { \
     145        Assert(pVCpu->iem.s.fTbBranched != 0); \
     146        if ((a_i8) == 0) \
     147            pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
     148        return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize); \
     149    } while (0)
     150
     151#undef IEM_MC_REL_JMP_S16_AND_FINISH
     152#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) do { \
     153        Assert(pVCpu->iem.s.fTbBranched != 0); \
     154        if ((a_i16) == 0) \
     155            pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
     156        return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16)); \
     157    } while (0)
     158
     159#undef IEM_MC_REL_JMP_S32_AND_FINISH
     160#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) do { \
     161        Assert(pVCpu->iem.s.fTbBranched != 0); \
     162        if ((a_i32) == 0) \
     163            pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
     164        return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize); \
     165    } while (0)
     166
     167
     168/*
     169 * Emit call macros.
     170 */
    143171#define IEM_MC2_BEGIN_EMIT_CALLS() \
    144172    { \
     
    169197        \
    170198        do { } while (0)
    171 
    172199#define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \
    173200        IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
     
    224251        pCall->auParams[2] = a_uArg2; \
    225252    } while (0)
    226 
    227253#define IEM_MC2_END_EMIT_CALLS(a_fCImplFlags) \
    228254        Assert(pTb->cInstructions <= pTb->Thrd.cCalls); \
     
    572598
    573599
     600/**
     601 * Helper for indicating that we've branched.
     602 */
     603DECL_FORCE_INLINE(void) iemThreadedSetBranched(PVMCPUCC pVCpu, uint8_t fTbBranched)
     604{
     605    pVCpu->iem.s.fTbBranched          = fTbBranched;
     606    pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;
     607    pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;
     608}
     609
     610
    574611/*
    575612 * Include the "annotated" IEMAllInstructions*.cpp.h files.
     
    748785    STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTbThreadedInstr, pTb->cInstructions);
    749786    STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTbThreadedCalls, pTb->Thrd.cCalls);
    750     Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x\n", pTb, pTb->GCPhysPc, pTb->cbOpcodes, pTb->fFlags, idxHash));
     787    if (LogIs12Enabled())
     788    {
     789        Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x cRanges=%u cInstr=%u cCalls=%u\n",
     790               pTb, pTb->GCPhysPc, pTb->cbOpcodes, pTb->fFlags, idxHash, pTb->cRanges, pTb->cInstructions, pTb->Thrd.cCalls));
     791        for (uint8_t idxRange = 0; idxRange < pTb->cRanges; idxRange++)
     792            Log12((" range#%u: offPg=%#05x offOp=%#04x LB %#04x pg#%u=%RGp\n", idxRange, pTb->aRanges[idxRange].offPhysPage,
     793                   pTb->aRanges[idxRange].offOpcodes, pTb->aRanges[idxRange].cbOpcodes, pTb->aRanges[idxRange].idxPhysPage,
     794                   pTb->aRanges[idxRange].idxPhysPage == 0
     795                   ? pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK
     796                   : pTb->aGCPhysPages[pTb->aRanges[idxRange].idxPhysPage - 1]));
     797    }
    751798    RT_NOREF(pVM);
    752799}
     
    861908        pVCpu->iem.s.fEndTb                 = false;
    862909        pVCpu->iem.s.fTbCheckOpcodes        = false;
    863         pVCpu->iem.s.fTbBranched            = false;
     910        pVCpu->iem.s.fTbBranched            = IEMBRANCHED_F_NO;
    864911        pVCpu->iem.s.fTbCrossedPage         = false;
    865912    }
     
    10191066     * Case 1: We've branched (RIP changed).
    10201067     *
    1021      * Sub-case 1a: Same page, no TLB load, so fTbCrossedPage is false.
     1068     * Sub-case 1a: Same page, no TLB load (fTbCrossedPage is false).
    10221069     *         Req: 1 extra range, no extra phys.
    10231070     *
    1024      * Sub-case 1b: Different page, so TLB load necessary and fTbCrossedPage is true.
     1071     * Sub-case 1b: Different page but no page boundrary crossing, so TLB load
     1072     *              necessary (fTbCrossedPage is true).
    10251073     *         Req: 1 extra range, probably 1 extra phys page entry.
    10261074     *
    1027      * Sub-case 1c: Different page, so TLB load necessary and fTbCrossedPage is true,
     1075     * Sub-case 1c: Different page, so TLB load necessary (fTbCrossedPage is true),
    10281076     *              but in addition we cross into the following page and require
    10291077     *              another TLB load.
     
    10311079     *
    10321080     * Sub-case 1d: Same page, so no initial TLB load necessary, but we cross into
    1033      *              the following page and thus fTbCrossedPage is true.
     1081     *              the following page (thus fTbCrossedPage is true).
    10341082     *         Req: 2 extra ranges, probably 1 extra phys page entry.
    10351083     *
     1084     * Note! The setting fTbCrossedPage is done by the iemOpcodeFetchBytesJmp, but
     1085     *       it may trigger "spuriously" from the CPU point of view because of
     1086     *       physical page changes that'll invalid the physical TLB and trigger a
     1087     *       call to the function.  In theory this be a big deal, just a bit
     1088     *       performance loss as we'll pick the LoadingTlb variants.
     1089     *
    10361090     * Note! We do not currently optimize branching to the next instruction (sorry
    1037      *       32-bit PIC code).  We could maybe do that in the branching code that sets (or not) fTbBranched.
    1038      */
    1039     if (pVCpu->iem.s.fTbBranched)
    1040     {
    1041 AssertFailed(); /** @todo enable including branches in TBs and debug this code. */
     1091     *       32-bit PIC code).  We could maybe do that in the branching code that
     1092     *       sets (or not) fTbBranched.
     1093     */
     1094    /** @todo Optimize 'jmp .next_instr' and 'call .next_instr'. Seen the jmp
     1095     *        variant in win 3.1 code and the call variant in 32-bit linux PIC
     1096     *        code.  This'll require filtering out far jmps and calls, as they
     1097     *        load CS which should technically be considered indirect since the
     1098     *        GDT/LDT entry's base address can be modified independently from
     1099     *        the code. */
     1100    if (pVCpu->iem.s.fTbBranched != 0)
     1101    {
    10421102        if (   !pVCpu->iem.s.fTbCrossedPage       /* 1a */
    10431103            || pVCpu->iem.s.offCurInstrStart >= 0 /* 1b */ )
     
    10471107            Assert(pVCpu->iem.s.offCurInstrStart + cbInstr <= GUEST_PAGE_SIZE);
    10481108
    1049             /* Check that we've got a free range. */
    1050             idxRange += 1;
    1051             if (idxRange < RT_ELEMENTS(pTb->aRanges))
    1052             { /* likely */ }
    1053             else
    1054                 return false;
    1055             pCall->idxRange    = idxRange;
    1056             pCall->auParams[1] = idxRange;
    1057             pCall->auParams[2] = 0;
    1058 
    1059             /* Check that we've got a free page slot. */
    1060             AssertCompile(RT_ELEMENTS(pTb->aGCPhysPages) == 2);
    1061             RTGCPHYS const GCPhysNew = pVCpu->iem.s.GCPhysInstrBuf & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
    1062             if ((pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == GCPhysNew)
    1063                 pTb->aRanges[idxRange].idxPhysPage = 0;
    1064             else if (   pTb->aGCPhysPages[0] == NIL_RTGCPHYS
    1065                      || pTb->aGCPhysPages[0] == GCPhysNew)
     1109            if (!(pVCpu->iem.s.fTbBranched & IEMBRANCHED_F_ZERO))
    10661110            {
    1067                 pTb->aGCPhysPages[0] = GCPhysNew;
    1068                 pTb->aRanges[idxRange].idxPhysPage = 1;
    1069             }
    1070             else if (   pTb->aGCPhysPages[1] == NIL_RTGCPHYS
    1071                      || pTb->aGCPhysPages[1] == GCPhysNew)
    1072             {
    1073                 pTb->aGCPhysPages[1] = GCPhysNew;
    1074                 pTb->aRanges[idxRange].idxPhysPage = 2;
     1111                /* Check that we've got a free range. */
     1112                idxRange += 1;
     1113                if (idxRange < RT_ELEMENTS(pTb->aRanges))
     1114                { /* likely */ }
     1115                else
     1116                {
     1117                    Log8(("%04x:%08RX64: out of ranges after branch\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     1118                    return false;
     1119                }
     1120                pCall->idxRange    = idxRange;
     1121                pCall->auParams[1] = idxRange;
     1122                pCall->auParams[2] = 0;
     1123
     1124                /* Check that we've got a free page slot. */
     1125                AssertCompile(RT_ELEMENTS(pTb->aGCPhysPages) == 2);
     1126                RTGCPHYS const GCPhysNew = pVCpu->iem.s.GCPhysInstrBuf & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
     1127                if ((pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == GCPhysNew)
     1128                    pTb->aRanges[idxRange].idxPhysPage = 0;
     1129                else if (   pTb->aGCPhysPages[0] == NIL_RTGCPHYS
     1130                         || pTb->aGCPhysPages[0] == GCPhysNew)
     1131                {
     1132                    pTb->aGCPhysPages[0] = GCPhysNew;
     1133                    pTb->aRanges[idxRange].idxPhysPage = 1;
     1134                }
     1135                else if (   pTb->aGCPhysPages[1] == NIL_RTGCPHYS
     1136                         || pTb->aGCPhysPages[1] == GCPhysNew)
     1137                {
     1138                    pTb->aGCPhysPages[1] = GCPhysNew;
     1139                    pTb->aRanges[idxRange].idxPhysPage = 2;
     1140                }
     1141                else
     1142                {
     1143                    Log8(("%04x:%08RX64: out of aGCPhysPages entires after branch\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     1144                    return false;
     1145                }
     1146
     1147                /* Finish setting up the new range. */
     1148                pTb->aRanges[idxRange].offPhysPage = pVCpu->iem.s.offCurInstrStart;
     1149                pTb->aRanges[idxRange].offOpcodes  = offOpcode;
     1150                pTb->aRanges[idxRange].cbOpcodes   = cbInstr;
     1151                pTb->aRanges[idxRange].u2Unused    = 0;
     1152                pTb->cRanges++;
    10751153            }
    10761154            else
    1077                 return false;
    1078 
    1079             /* Finish setting up the new range. */
    1080             pTb->aRanges[idxRange].offPhysPage = pVCpu->iem.s.offCurInstrStart;
    1081             pTb->aRanges[idxRange].offOpcodes  = offOpcode;
    1082             pTb->aRanges[idxRange].cbOpcodes   = cbInstr;
    1083             pTb->aRanges[idxRange].u2Unused    = 0;
    1084             pTb->cRanges++;
     1155            {
     1156                Log8(("%04x:%08RX64: zero byte jump\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     1157                pTb->aRanges[idxRange].cbOpcodes += cbInstr;
     1158            }
    10851159
    10861160            /* Determin which function we need to load & check.
     
    10881162                     fTbCrossedPage to avoid unnecessary TLB work for intra
    10891163                     page branching */
    1090             if (pVCpu->iem.s.fTbCrossedPage)
     1164            if (   (pVCpu->iem.s.fTbBranched & (IEMBRANCHED_F_INDIRECT | IEMBRANCHED_F_FAR)) /* Far is basically indirect. */
     1165                || pVCpu->iem.s.fTbCrossedPage)
    10911166                pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS
    10921167                                   ? kIemThreadedFunc_CheckCsLimAndOpcodesLoadingTlb
    10931168                                   : kIemThreadedFunc_CheckOpcodesLoadingTlb;
     1169            else if (pVCpu->iem.s.fTbBranched & (IEMBRANCHED_F_CONDITIONAL | /* paranoia: */ IEMBRANCHED_F_DIRECT))
     1170                pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS
     1171                                   ? kIemThreadedFunc_CheckCsLimAndPcAndOpcodes
     1172                                   : kIemThreadedFunc_CheckPcAndOpcodes;
    10941173            else
     1174            {
     1175                Assert(pVCpu->iem.s.fTbBranched & IEMBRANCHED_F_RELATIVE);
    10951176                pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS
    10961177                                   ? kIemThreadedFunc_CheckCsLimAndOpcodes
    10971178                                   : kIemThreadedFunc_CheckOpcodes;
     1179            }
    10981180        }
    10991181        else
     
    11261208
    11271209#else
     1210            Log8(("%04x:%08RX64: complicated post-branch condition, ending TB.\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    11281211            return false;
    11291212#endif
     
    11501233        { /* likely */ }
    11511234        else
     1235        {
     1236            Log8(("%04x:%08RX64: out of ranges while crossing page\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    11521237            return false;
     1238        }
    11531239
    11541240        /* Check that we've got a free page slot. */
     
    11701256        }
    11711257        else
     1258        {
     1259            Log8(("%04x:%08RX64: out of aGCPhysPages entires while crossing page\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    11721260            return false;
     1261        }
    11731262
    11741263        if (((pTb->aRanges[idxRange - 1].offPhysPage + pTb->aRanges[idxRange - 1].cbOpcodes) & GUEST_PAGE_OFFSET_MASK) == 0)
     
    11881277            /* Determin which function we need to load & check. */
    11891278            pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS
    1190                                ? kIemThreadedFunc_CheckCsLimAndOpcodesLoadingTlb
    1191                                : kIemThreadedFunc_CheckOpcodesLoadingTlb;
     1279                               ? kIemThreadedFunc_CheckCsLimAndOpcodesOnNewPageLoadingTlb
     1280                               : kIemThreadedFunc_CheckOpcodesOnNewPageLoadingTlb;
    11921281        }
    11931282        else
     
    12461335     * Clear state.
    12471336     */
    1248     pVCpu->iem.s.fTbBranched     = false;
     1337    pVCpu->iem.s.fTbBranched     = IEMBRANCHED_F_NO;
    12491338    pVCpu->iem.s.fTbCrossedPage  = false;
    12501339    pVCpu->iem.s.fTbCheckOpcodes = false;
     
    13761465{
    13771466    /* Check the opcodes in the first page before starting execution. */
    1378     uint32_t const cbLeadOpcodes = RT_MIN(pTb->cbOpcodes, pVCpu->iem.s.cbInstrBufTotal - pVCpu->iem.s.offInstrNextByte);
    1379     if (memcmp(pTb->pabOpcodes, &pVCpu->iem.s.pbInstrBuf[pVCpu->iem.s.offInstrNextByte], cbLeadOpcodes) == 0)
    1380         Assert(   pTb->cbOpcodes == cbLeadOpcodes
    1381                || cbLeadOpcodes == (GUEST_PAGE_SIZE - (pTb->GCPhysPc & GUEST_PAGE_OFFSET_MASK)));
     1467    Assert(!(pVCpu->iem.s.GCPhysInstrBuf & (RTGCPHYS)GUEST_PAGE_OFFSET_MASK));
     1468    Assert(pTb->aRanges[0].cbOpcodes <= pVCpu->iem.s.cbInstrBufTotal - pVCpu->iem.s.offInstrNextByte);
     1469    if (memcmp(pTb->pabOpcodes, &pVCpu->iem.s.pbInstrBuf[pTb->aRanges[0].offPhysPage], pTb->aRanges[0].cbOpcodes) == 0)
     1470    { /* likely */ }
    13821471    else
    13831472    {
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r100695 r100731  
    8181 * Linux, but it should be quite a bit faster for normal code.
    8282 */
    83 #if (defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
     83#if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
    8484 || defined(DOXYGEN_RUNNING)
    8585# define IEM_WITH_THROW_CATCH
     
    843843typedef IEMTB const *PCIEMTB;
    844844
     845/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
     846 *
     847 * These flags parallels IEM_CIMPL_F_BRANCH_XXX.
     848 *
     849 * @{ */
     850/** Value if no branching happened recently. */
     851#define IEMBRANCHED_F_NO            UINT8_C(0x00)
     852/** Flag set if direct branch, clear if absolute or indirect. */
     853#define IEMBRANCHED_F_DIRECT        UINT8_C(0x01)
     854/** Flag set if indirect branch, clear if direct or relative. */
     855#define IEMBRANCHED_F_INDIRECT      UINT8_C(0x02)
     856/** Flag set if relative branch, clear if absolute or indirect. */
     857#define IEMBRANCHED_F_RELATIVE      UINT8_C(0x04)
     858/** Flag set if conditional branch, clear if unconditional. */
     859#define IEMBRANCHED_F_CONDITIONAL   UINT8_C(0x08)
     860/** Flag set if it's a far branch. */
     861#define IEMBRANCHED_F_FAR           UINT8_C(0x10)
     862/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
     863#define IEMBRANCHED_F_ZERO          UINT8_C(0x20)
     864/** @} */
     865
    845866
    846867/**
     
    11391160     * This is set by a previous instruction if it modified memory or similar.  */
    11401161    bool                    fTbCheckOpcodes;
    1141     /** Whether we just branched and need to start a new opcode range and emit code
    1142      * to do a TLB load and check them again. */
    1143     bool                    fTbBranched;
     1162    /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
     1163    uint8_t                 fTbBranched;
    11441164    /** Set when GCPhysInstrBuf is updated because of a page crossing. */
    11451165    bool                    fTbCrossedPage;
     
    11481168    /** Spaced reserved for recompiler data / alignment. */
    11491169    bool                    afRecompilerStuff1[4];
     1170    /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set.   */
     1171    RTGCPHYS                GCPhysInstrBufPrev;
     1172    /** Copy of IEMCPU::GCPhysInstrBuf after decoding a branch instruction.
     1173     * This is used together with fTbBranched and GCVirtTbBranchSrcBuf to determin
     1174     * whether a branch instruction jumps to a new page or stays within the
     1175     * current one. */
     1176    RTGCPHYS                GCPhysTbBranchSrcBuf;
     1177    /** Copy of IEMCPU::uInstrBufPc after decoding a branch instruction.  */
     1178    uint64_t                GCVirtTbBranchSrcBuf;
     1179    /* Alignment. */
     1180    uint64_t                au64RecompilerStuff2[5];
    11501181    /** Threaded TB statistics: Number of instructions per TB. */
    11511182    STAMPROFILE             StatTbThreadedInstr;
     
    43864417
    43874418/**
    4388  * Macro for calling iemCImplRaiseInvalidOpcode().
    4389  *
    4390  * This enables us to add/remove arguments and force different levels of
    4391  * inlining as we wish.
     4419 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
     4420 *
     4421 * This is for things that will _always_ decode to an \#UD, taking the
     4422 * recompiler into consideration and everything.
    43924423 *
    43934424 * @return  Strict VBox status code.
    43944425 */
    43954426#define IEMOP_RAISE_INVALID_OPCODE_RET()    IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode)
     4427
     4428/**
     4429 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
     4430 *
     4431 * Using this macro means you've got _buggy_ _code_ and are doing things that
     4432 * belongs exclusively in IEMAllCImpl.cpp during decoding.
     4433 *
     4434 * @return  Strict VBox status code.
     4435 * @see     IEMOP_RAISE_INVALID_OPCODE_RET
     4436 */
     4437#define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET()   IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode)
     4438
    43964439/** @} */
    43974440
     
    48994942
    49004943void            iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb);
     4944
    49014945IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckMode,
    49024946                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    49034947IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLim,
    49044948                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4949
    49054950IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodes,
    4906                     (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    4907 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,
    4908                     (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    4909 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,
    4910                     (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    4911 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,
    49124951                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    49134952IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodes,
    49144953                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    4915 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,
     4954
     4955/* Branching: */
     4956IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes,
     4957                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4958IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckPcAndOpcodes,
     4959                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4960
     4961IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,
    49164962                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    49174963IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,
    49184964                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4965
     4966/* Natural page crossing: */
     4967IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,
     4968                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4969IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,
     4970                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4971
     4972IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,
     4973                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    49194974IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb,
    49204975                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
    49214976
     4977IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb,
     4978                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4979IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb,
     4980                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4981
     4982
    49224983
    49234984extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
  • trunk/src/VBox/VMM/include/IEMMc.h

    r100701 r100731  
    12401240 *
    12411241 * @{ */
     1242/** Flag set if direct branch, clear if absolute or indirect. */
     1243#define IEM_CIMPL_F_BRANCH_DIRECT        RT_BIT_32(0)
     1244/** Flag set if indirect branch, clear if direct or relative.
     1245 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
     1246 * as well as for return instructions (RET, IRET, RETF). */
     1247#define IEM_CIMPL_F_BRANCH_INDIRECT      RT_BIT_32(1)
     1248/** Flag set if relative branch, clear if absolute or indirect. */
     1249#define IEM_CIMPL_F_BRANCH_RELATIVE      RT_BIT_32(2)
     1250/** Flag set if conditional branch, clear if unconditional. */
     1251#define IEM_CIMPL_F_BRANCH_CONDITIONAL   RT_BIT_32(3)
     1252/** Flag set if it's a far branch (changes CS). */
     1253#define IEM_CIMPL_F_BRANCH_FAR           RT_BIT_32(4)
     1254/** Convenience: Testing any kind of branch. */
     1255#define IEM_CIMPL_F_BRANCH_ANY          (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
     1256
    12421257/** Execution flags may change (IEMCPU::fExec). */
    1243 #define IEM_CIMPL_F_MODE            RT_BIT_32(0)
    1244 /** Unconditional direct branches (changes RIP, maybe CS). */
    1245 #define IEM_CIMPL_F_BRANCH_UNCOND   RT_BIT_32(1)
    1246 /** Conditional direct branch (may change RIP, maybe CS). */
    1247 #define IEM_CIMPL_F_BRANCH_COND     RT_BIT_32(2)
    1248 /** Indirect unconditional branch (changes RIP, maybe CS).
    1249  *
    1250  * This is used for all system control transfers (SYSCALL, SYSRET, INT, ++) as
    1251  * well as for return instructions (RET, IRET, RETF).
    1252  *
    1253  * Since the INTO instruction is currently the only indirect branch instruction
    1254  * that is conditional (depends on the overflow flag), that instruction will
    1255  * have both IEM_CIMPL_F_BRANCH_INDIR and IEM_CIMPL_F_BRANCH_COND set.  All
    1256  * other branch instructions will have exactly one of the branch flags set. */
    1257 #define IEM_CIMPL_F_BRANCH_INDIR    RT_BIT_32(3)
     1258#define IEM_CIMPL_F_MODE                RT_BIT_32(5)
    12581259/** May change significant portions of RFLAGS. */
    1259 #define IEM_CIMPL_F_RFLAGS          RT_BIT_32(4)
     1260#define IEM_CIMPL_F_RFLAGS              RT_BIT_32(6)
    12601261/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS . */
    1261 #define IEM_CIMPL_F_STATUS_FLAGS    RT_BIT_32(5)
     1262#define IEM_CIMPL_F_STATUS_FLAGS        RT_BIT_32(7)
    12621263/** May trigger a VM exit. */
    1263 #define IEM_CIMPL_F_VMEXIT          RT_BIT_32(6)
     1264#define IEM_CIMPL_F_VMEXIT              RT_BIT_32(8)
    12641265/** May modify FPU state. */
    1265 #define IEM_CIMPL_F_FPU             RT_BIT_32(7)
     1266#define IEM_CIMPL_F_FPU                 RT_BIT_32(9)
    12661267/** REP prefixed instruction which may yield before updating PC. */
    1267 #define IEM_CIMPL_F_REP             RT_BIT_32(8)
     1268#define IEM_CIMPL_F_REP                 RT_BIT_32(10)
    12681269/** Force end of TB after the instruction.    */
    1269 #define IEM_CIMPL_F_END_TB          RT_BIT_32(9)
     1270#define IEM_CIMPL_F_END_TB              RT_BIT_32(11)
    12701271/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
    1271 #define IEM_CIMPL_F_XCPT            (IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
    1272 /** Convenience: Testing any kind of branch. */
    1273 #define IEM_CIMPL_F_BRANCH_ANY      (IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_BRANCH_COND | IEM_CIMPL_F_BRANCH_INDIR)
     1272#define IEM_CIMPL_F_XCPT \
     1273    (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
    12741274/** @} */
    12751275
  • trunk/src/VBox/VMM/include/IEMOpHlp.h

    r100714 r100731  
    296296#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    297297/** This instruction raises an \#UD in real and V8086 mode or when not using a
    298  *  64-bit code segment when in long mode (applicable to all VMX instructions
    299  *  except VMCALL).
    300  */
    301 #define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
     298 * 64-bit code segment when in long mode (applicable to all VMX instructions
     299 * except VMCALL).
     300 *
     301 * @todo r=bird: This is not recompiler friendly. The scenario with
     302 *       16-bit/32-bit code running in long mode doesn't fit at all.
     303 */
     304# define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
    302305    do \
    303306    { \
     
    318321                pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
    319322                Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
    320                 IEMOP_RAISE_INVALID_OPCODE_RET(); \
     323                IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET(); /** @todo This doesn't work. */ \
    321324            } \
    322325        } \
     
    340343            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
    341344            Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
    342             IEMOP_RAISE_INVALID_OPCODE_RET(); \
     345            IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET(); /** @todo This doesn't work. */ \
    343346        } \
    344347    } while (0)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette