- Timestamp:
- Aug 14, 2024 8:47:13 AM (7 months ago)
- svn:sync-xref-src-repo-rev:
- 164358
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r105465 r105664 10674 10674 10675 10675 10676 /** 10677 * Worker for 'VGATHERcxx' / 'VPGATHERxx' masked loads. 10678 * 10679 * @param u32PackedArgs Arguments packed to the tune of IEMGATHERARGS. 10680 * @param u32Disp The address displacement for the indices. 10681 */ 10682 IEM_CIMPL_DEF_2(iemCImpl_vpgather_worker_xx, uint32_t, u32PackedArgs, uint32_t, u32Disp) 10683 { 10684 IEMGATHERARGS const PackedArgs = { u32PackedArgs }; 10685 int32_t const offDisp = (int32_t)u32Disp; 10686 10687 if (PackedArgs.s.iYRegDst == PackedArgs.s.iYRegIdc || 10688 PackedArgs.s.iYRegIdc == PackedArgs.s.iYRegMsk || 10689 PackedArgs.s.iYRegDst == PackedArgs.s.iYRegMsk) return iemRaiseUndefinedOpcode(pVCpu); 10690 10691 Assert(PackedArgs.s.enmEffOpSize <= IEMMODE_64BIT); 10692 Assert(PackedArgs.s.enmEffAddrMode <= IEMMODE_64BIT); 10693 10694 uint32_t const cbMaxWidth = PackedArgs.s.fVex256 ? 32 : 16; /* Width of widest XMM / YMM register we will use: 32 or 16 */ 10695 uint32_t const cbIdxWidth = PackedArgs.s.fIdxQword ? 8 : 4; /* Width of one index: 4-byte dword or 8-byte qword */ 10696 uint32_t const cbValWidth = PackedArgs.s.fValQword ? 8 : 4; /* Width of one value: 4-byte dword or 8-byte qword */ 10697 uint32_t const cMasks = cbMaxWidth / cbValWidth; /* Count of masks: 8 or 4 or 2 */ 10698 uint32_t const cIndices = cbMaxWidth / cbIdxWidth; /* Count of indices: 8 or 4 or 2 */ 10699 uint32_t const cValues = RT_MIN(cMasks, cIndices); /* Count of values to gather: 8 or 4 or 2 */ 10700 Assert(cValues == 2 || cValues == 4 || cValues == 8); 10701 uint32_t const cbDstWidth = cValues * cbValWidth; /* Width of the destination & mask XMM / YMM registers: 32 or 16 or 8 */ 10702 Assert(cbDstWidth == 8 || cbDstWidth == 16 || cbDstWidth == 32); 10703 10704 /* 10705 * Get the base pointer. 10706 */ 10707 uint64_t u64Base = iemGRegFetchU64(pVCpu, PackedArgs.s.iGRegBase); 10708 if (PackedArgs.s.enmEffAddrMode != IEMMODE_64BIT) 10709 u64Base &= (PackedArgs.s.enmEffAddrMode == IEMMODE_16BIT ? UINT16_MAX : UINT32_MAX); 10710 10711 PRTUINT128U const apuDst[2] = 10712 { 10713 &pVCpu->cpum.GstCtx.XState.x87.aXMM[PackedArgs.s.iYRegDst].uXmm, 10714 &pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[PackedArgs.s.iYRegDst].uXmm 10715 }; 10716 PCRTUINT128U const apuIdc[2] = 10717 { 10718 &pVCpu->cpum.GstCtx.XState.x87.aXMM[PackedArgs.s.iYRegIdc].uXmm, 10719 &pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[PackedArgs.s.iYRegIdc].uXmm 10720 }; 10721 PRTUINT128U const apuMsk[2] = 10722 { 10723 &pVCpu->cpum.GstCtx.XState.x87.aXMM[PackedArgs.s.iYRegMsk].uXmm, 10724 &pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[PackedArgs.s.iYRegMsk].uXmm 10725 }; 10726 10727 /* 10728 * Convert the masks to all-0s or all-1s, writing back to the mask 10729 * register so it will have the correct value if subsequent memory 10730 * accesses fault. Note that cMasks can be larger than cValues, in 10731 * the Qword-index, Dword-value instructions `vgatherqps' and 10732 * `vpgatherqd'. Updating the masks for as many masks as *would* 10733 * have been used if the destination register were wide enough -- 10734 * is the observed behavior of a Core i7-10700. 10735 */ 10736 if (!PackedArgs.s.fValQword) 10737 for (uint32_t i = 0; i < cMasks; i++) 10738 apuMsk[(i >> 2) & 1]->ai32[i & 3] >>= 31; /* Use arithmetic shift right (SAR/ASR) */ 10739 else 10740 for (uint32_t i = 0; i < cMasks; i++) 10741 apuMsk[(i >> 1) & 1]->ai64[i & 1] >>= 63; /* Use arithmetic shift right (SAR/ASR) */ 10742 10743 /* 10744 * Zero upper bits of mask if VEX128. 10745 */ 10746 if (!PackedArgs.s.fVex256) 10747 { 10748 apuMsk[1]->au64[0] = 0; 10749 apuMsk[1]->au64[1] = 0; 10750 } 10751 10752 /* 10753 * Gather the individual values, as masked. 10754 */ 10755 for (uint32_t i = 0; i < cValues; i++) 10756 { 10757 /* 10758 * Consult the mask determined above. 10759 */ 10760 if ( !PackedArgs.s.fValQword 10761 ? apuMsk[(i >> 2) & 1]->au32[i & 3] != 0 10762 : apuMsk[(i >> 1) & 1]->au64[i & 1] != 0) 10763 { 10764 /* 10765 * Get the index, scale it, add scaled index + offset to the base pointer. 10766 */ 10767 int64_t offIndex; 10768 if (!PackedArgs.s.fIdxQword) 10769 offIndex = apuIdc[(i >> 2) & 1]->ai32[i & 3]; 10770 else 10771 offIndex = apuIdc[(i >> 1) & 1]->ai64[i & 1]; 10772 offIndex <<= PackedArgs.s.iScale; 10773 offIndex += offDisp; 10774 10775 uint64_t u64Addr = u64Base + offIndex; 10776 if (PackedArgs.s.enmEffAddrMode != IEMMODE_64BIT) 10777 u64Addr &= UINT32_MAX; 10778 10779 /* 10780 * Gather it -- fetch this gather-item from guest memory. 10781 */ 10782 VBOXSTRICTRC rcStrict; 10783 if (!PackedArgs.s.fValQword) 10784 rcStrict = iemMemFetchDataU32NoAc(pVCpu, &apuDst[(i >> 2) & 1]->au32[i & 3], PackedArgs.s.iEffSeg, u64Addr); 10785 else 10786 rcStrict = iemMemFetchDataU64NoAc(pVCpu, &apuDst[(i >> 1) & 1]->au64[i & 1], PackedArgs.s.iEffSeg, u64Addr); 10787 if (rcStrict != VINF_SUCCESS) 10788 return rcStrict; 10789 10790 /* 10791 * Now that we *didn't* fault, write all-0s to that part of the mask register. 10792 */ 10793 if (!PackedArgs.s.fValQword) 10794 apuMsk[(i >> 2) & 1]->au32[i & 3] = 0; 10795 else 10796 apuMsk[(i >> 1) & 1]->au64[i & 1] = 0; 10797 /** @todo How is data breakpoints handled? The intel docs kind of hints they 10798 * may be raised here... */ 10799 } 10800 } 10801 10802 /* 10803 * Zero upper bits of destination and mask. 10804 */ 10805 if (cbDstWidth != 32) 10806 { 10807 apuDst[1]->au64[0] = 0; 10808 apuDst[1]->au64[1] = 0; 10809 apuMsk[1]->au64[0] = 0; 10810 apuMsk[1]->au64[1] = 0; 10811 if (cbDstWidth == 8) 10812 { 10813 apuDst[0]->au64[1] = 0; 10814 apuMsk[0]->au64[1] = 0; 10815 } 10816 } 10817 10818 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 10819 } 10820 10676 10821 /** @} */ 10677 10822 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r105652 r105664 290 290 'Mx': ( 'IDX_UseModRM', 'rm', '%Mx', 'Mx', 'MEM', ), 291 291 'Mx_WO': ( 'IDX_UseModRM', 'rm', '%Mx', 'Mx', 'MEM', ), 292 'MVx': ( 'IDX_UseModRM', 'rm', '%MVx', 'MVx', 'MEM', ), ##< VSIB only. 'V' is '*' in AMD manuals. 293 'MVx_RO': ( 'IDX_UseModRM', 'rm', '%MVx', 'MVx', 'MEM', ), ##< VSIB only. 294 'MVx_WO': ( 'IDX_UseModRM', 'rm', '%MVx', 'MVx', 'MEM', ), ##< VSIB only. 292 295 'M_RO': ( 'IDX_UseModRM', 'rm', '%M', 'M', 'MEM', ), 293 296 'M_RW': ( 'IDX_UseModRM', 'rm', '%M', 'M', 'MEM', ), -
trunk/src/VBox/VMM/VMMAll/IEMAllInstVexMap2.cpp.h
r105445 r105664 1564 1564 FNIEMOP_DEF(iemOp_vpsrlvd_q_Vx_Hx_Wx) 1565 1565 { 1566 IEMOP_MNEMONIC3(VEX_RVM, VPSRLVD, vpsrlvd, Vx, Hx, Wx, DISOPTYPE_HARMLESS, 0);1567 1568 1566 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 1569 1567 { 1568 IEMOP_MNEMONIC3(VEX_RVM, VPSRLVQ, vpsrlvq, Vx, Hx, Wx, DISOPTYPE_HARMLESS, 0); 1570 1569 IEMOPMEDIAOPTF3_INIT_VARS(vpsrlvq); 1571 1570 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_Opt, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); … … 1573 1572 else 1574 1573 { 1574 /** 1575 * @opdone 1576 */ 1577 IEMOP_MNEMONIC3(VEX_RVM, VPSRLVD, vpsrlvd, Vx, Hx, Wx, DISOPTYPE_HARMLESS, 0); 1575 1578 IEMOPMEDIAOPTF3_INIT_VARS(vpsrlvd); 1576 1579 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_Opt, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); … … 1591 1594 FNIEMOP_DEF(iemOp_vpsllvd_q_Vx_Hx_Wx) 1592 1595 { 1593 IEMOP_MNEMONIC3(VEX_RVM, VPSLLVD, vpsllvd, Vx, Hx, Wx, DISOPTYPE_HARMLESS, 0);1594 1595 1596 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 1596 1597 { 1598 IEMOP_MNEMONIC3(VEX_RVM, VPSLLVQ, vpsllvq, Vx, Hx, Wx, DISOPTYPE_HARMLESS, 0); 1597 1599 IEMOPMEDIAOPTF3_INIT_VARS(vpsllvq); 1598 1600 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_Opt, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); … … 1600 1602 else 1601 1603 { 1604 /** 1605 * @opdone 1606 */ 1607 IEMOP_MNEMONIC3(VEX_RVM, VPSLLVD, vpsllvd, Vx, Hx, Wx, DISOPTYPE_HARMLESS, 0); 1602 1608 IEMOPMEDIAOPTF3_INIT_VARS(vpsllvd); 1603 1609 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_Opt, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); … … 2150 2156 2151 2157 /* Opcode VEX.66.0F38 0x8d - invalid. */ 2152 /** Opcode VEX.66.0F38 0x8e. */2153 2158 2154 2159 … … 2262 2267 /* Opcode VEX.66.0F38 0x8f - invalid. */ 2263 2268 2269 2270 /** 2271 * Common worker for xxgatherxx AVX2 instructions 2272 */ 2273 FNIEMOP_DEF_1(iemOpCommonAvx2Gather_Vx_Hx_Wx, bool, fIdxQword) 2274 { 2275 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2276 if (IEM_IS_MODRM_REG_MODE(bRm)) 2277 IEMOP_RAISE_INVALID_OPCODE_RET(); /* no register form */ 2278 2279 /* Doing a partial IEM_MC_CALC_RM_EFF_ADDR by hand here. It is simplified 2280 by (V)SIB being a hard requirement. */ 2281 if ((bRm & X86_MODRM_RM_MASK) != 4 /*VSIB*/) 2282 IEMOP_RAISE_INVALID_OPCODE_RET(); 2283 2284 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 2285 2286 uint32_t u32Disp = 0; /* Should've been 'offDisp', but python script needs the 'u32' type hint to cope. */ 2287 if ((bRm & X86_MODRM_MOD_MASK) == (X86_MOD_MEM1 << X86_MODRM_MOD_SHIFT)) 2288 IEM_OPCODE_GET_NEXT_S8_SX_U32(&u32Disp); 2289 else if ((bRm & X86_MODRM_MOD_MASK) == (X86_MOD_MEM4 << X86_MODRM_MOD_SHIFT)) 2290 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 2291 2292 /* We pack arguments into a single 32-bit value, because passing them individually 2293 would greatly exceed the max number of arguments the code generator can handle. */ 2294 IEMGATHERARGS PackedArgs = {0}; 2295 PackedArgs.s.iYRegDst = IEM_GET_MODRM_REG(pVCpu, bRm); 2296 PackedArgs.s.iYRegIdc = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex; 2297 PackedArgs.s.iYRegMsk = IEM_GET_EFFECTIVE_VVVV(pVCpu); 2298 PackedArgs.s.iGRegBase = (bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB; 2299 PackedArgs.s.iEffSeg = pVCpu->iem.s.iEffSeg; 2300 if ( (PackedArgs.s.iGRegBase == X86_GREG_xSP || PackedArgs.s.iGRegBase == X86_GREG_xBP) 2301 && !(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) 2302 PackedArgs.s.iEffSeg = X86_SREG_SS; 2303 PackedArgs.s.iScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 2304 PackedArgs.s.enmEffOpSize = pVCpu->iem.s.enmEffOpSize; 2305 PackedArgs.s.enmEffAddrMode = pVCpu->iem.s.enmEffAddrMode; 2306 PackedArgs.s.fVex256 = pVCpu->iem.s.uVexLength; 2307 PackedArgs.s.fIdxQword = fIdxQword; 2308 PackedArgs.s.fValQword = (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) ? 1 : 0; 2309 2310 uint32_t const u32PackedArgs = PackedArgs.u; /* Workaround: Python gets confused if we directly use 'PackedArgs.u' below. */ 2311 2312 /* Call the C helper: */ 2313 IEM_MC_BEGIN(IEM_MC_F_NOT_286_OR_OLDER, 0); 2314 IEMOP_HLP_DONE_VEX_DECODING_EX(fAvx2); 2315 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 2316 IEM_MC_PREPARE_AVX_USAGE(); 2317 IEM_MC_ARG_CONST(uint32_t, u32PackedArgsArg, u32PackedArgs, 0); 2318 IEM_MC_ARG_CONST(uint32_t, u32DispArg, u32Disp, 1); 2319 IEM_MC_CALL_CIMPL_2(0, 0, iemCImpl_vpgather_worker_xx, u32PackedArgsArg, u32DispArg); 2320 IEM_MC_END(); 2321 } 2322 2264 2323 /** Opcode VEX.66.0F38 0x90 (vex only). */ 2265 FNIEMOP_STUB(iemOp_vpgatherdd_q_Vx_Hx_Wx); 2324 FNIEMOP_DEF(iemOp_vpgatherdd_q_Vx_Hx_Wx) 2325 { 2326 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2327 IEMOP_MNEMONIC3(VEX_RMV_MEM, VPGATHERDQ, vpgatherdq, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2328 else 2329 /** 2330 * @opdone 2331 */ 2332 IEMOP_MNEMONIC3(VEX_RMV_MEM, VPGATHERDD, vpgatherdd, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2333 return FNIEMOP_CALL_1(iemOpCommonAvx2Gather_Vx_Hx_Wx, 0); 2334 } 2335 2266 2336 /** Opcode VEX.66.0F38 0x91 (vex only). */ 2267 FNIEMOP_STUB(iemOp_vpgatherqd_q_Vx_Hx_Wx); 2337 FNIEMOP_DEF(iemOp_vpgatherqd_q_Vx_Hx_Wx) 2338 { 2339 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2340 IEMOP_MNEMONIC3(VEX_RMV_MEM, VPGATHERQQ, vpgatherqq, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2341 else 2342 /** 2343 * @opdone 2344 */ 2345 IEMOP_MNEMONIC3(VEX_RMV_MEM, VPGATHERQD, vpgatherqd, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2346 return FNIEMOP_CALL_1(iemOpCommonAvx2Gather_Vx_Hx_Wx, 1); 2347 } 2348 2268 2349 /** Opcode VEX.66.0F38 0x92 (vex only). */ 2269 FNIEMOP_STUB(iemOp_vgatherdps_d_Vx_Hx_Wx); 2350 FNIEMOP_DEF(iemOp_vgatherdps_d_Vx_Hx_Wx) 2351 { 2352 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2353 IEMOP_MNEMONIC3(VEX_RMV_MEM, VGATHERDPD, vgatherdpd, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2354 else 2355 /** 2356 * @opdone 2357 */ 2358 IEMOP_MNEMONIC3(VEX_RMV_MEM, VGATHERDPS, vgatherdps, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2359 return FNIEMOP_CALL_1(iemOpCommonAvx2Gather_Vx_Hx_Wx, 0); 2360 } 2361 2270 2362 /** Opcode VEX.66.0F38 0x93 (vex only). */ 2271 FNIEMOP_STUB(iemOp_vgatherqps_d_Vx_Hx_Wx); 2363 FNIEMOP_DEF(iemOp_vgatherqps_d_Vx_Hx_Wx) 2364 { 2365 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 2366 IEMOP_MNEMONIC3(VEX_RMV_MEM, VGATHERQPD, vgatherqpd, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2367 else 2368 /** 2369 * @opdone 2370 */ 2371 IEMOP_MNEMONIC3(VEX_RMV_MEM, VGATHERQPS, vgatherqps, Vx, MVx, Hx, DISOPTYPE_HARMLESS | DISOPTYPE_X86_AVX, 0); /** @todo? */ 2372 return FNIEMOP_CALL_1(iemOpCommonAvx2Gather_Vx_Hx_Wx, 1); 2373 } 2374 2272 2375 /* Opcode VEX.66.0F38 0x94 - invalid. */ 2273 2376 /* Opcode VEX.66.0F38 0x95 - invalid. */ -
trunk/src/VBox/VMM/include/IEMInternal.h
r105616 r105664 6199 6199 VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 6200 6200 VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 6201 VBOXSTRICTRC iemMemFetchDataU32NoAc(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 6201 6202 VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 6202 6203 VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 6204 VBOXSTRICTRC iemMemFetchDataU64NoAc(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 6203 6205 VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 6204 6206 VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; … … 6509 6511 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc); 6510 6512 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc); 6513 IEM_CIMPL_PROTO_2(iemCImpl_vpgather_worker_xx, uint32_t, u32PackedArgs, uint32_t, u32Disp); 6511 6514 6512 6515 /** @} */ … … 6822 6825 DECLHIDDEN(struct IEMNATIVEPERCHUNKCTX const *) iemNativeRecompileAttachExecMemChunkCtx(PVMCPU pVCpu, uint32_t idxChunk); 6823 6826 6827 /** Packed 32-bit argument for iemCImpl_vpgather_worker_xx. */ 6828 typedef union IEMGATHERARGS 6829 { 6830 /** Integer view. */ 6831 uint32_t u; 6832 /** Bitfield view. */ 6833 struct 6834 { 6835 uint32_t iYRegDst : 4; /**< 0 - XMM or YMM register number (destination) */ 6836 uint32_t iYRegIdc : 4; /**< 4 - XMM or YMM register number (indices) */ 6837 uint32_t iYRegMsk : 4; /**< 8 - XMM or YMM register number (mask) */ 6838 uint32_t iGRegBase : 4; /**< 12 - general register number (base ptr) */ 6839 uint32_t iScale : 2; /**< 16 - scale factor (1/2/4/8) */ 6840 uint32_t enmEffOpSize : 2; /**< 18 - operand size (16/32/64/--) */ 6841 uint32_t enmEffAddrMode : 2; /**< 20 - addressing mode (16/32/64/--) */ 6842 uint32_t iEffSeg : 3; /**< 22 - effective segment (ES/CS/SS/DS/FS/GS) */ 6843 uint32_t fVex256 : 1; /**< 25 - overall instruction width (128/256 bits) */ 6844 uint32_t fIdxQword : 1; /**< 26 - individual index width (4/8 bytes) */ 6845 uint32_t fValQword : 1; /**< 27 - individual value width (4/8 bytes) */ 6846 } s; 6847 } IEMGATHERARGS; 6848 AssertCompileSize(IEMGATHERARGS, sizeof(uint32_t)); 6849 6824 6850 #endif /* !RT_IN_ASSEMBLER - ASM-NOINC-END */ 6825 6851 -
trunk/src/VBox/VMM/include/IEMMc.h
r105652 r105664 2839 2839 /** 2840 2840 * Defers the rest of the instruction emulation to a C implementation routine 2841 * and returns, taking twoarguments in addition to the standard ones.2841 * and returns, taking five arguments in addition to the standard ones. 2842 2842 * 2843 2843 * @param a_fFlags IEM_CIMPL_F_XXX.
Note:
See TracChangeset
for help on using the changeset viewer.