Changeset 66909 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- May 16, 2017 1:29:44 PM (8 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r66906 r66909 10183 10183 10184 10184 /** 10185 * Stores a data dqword. 10186 * 10187 * @returns Strict VBox status code. 10188 * @param pVCpu The cross context virtual CPU structure of the calling thread. 10189 * @param iSegReg The index of the segment register to use for 10190 * this access. The base and limits are checked. 10191 * @param GCPtrMem The address of the guest memory. 10192 * @param pu256Value Pointer to the value to store. 10193 */ 10194 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) 10195 { 10196 /* The lazy approach for now... */ 10197 PRTUINT256U pu256Dst; 10198 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 10199 if (rc == VINF_SUCCESS) 10200 { 10201 pu256Dst->au64[0] = pu256Value->au64[0]; 10202 pu256Dst->au64[1] = pu256Value->au64[1]; 10203 pu256Dst->au64[2] = pu256Value->au64[2]; 10204 pu256Dst->au64[3] = pu256Value->au64[3]; 10205 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 10206 } 10207 return rc; 10208 } 10209 10210 10211 #ifdef IEM_WITH_SETJMP 10212 /** 10213 * Stores a data dqword, longjmp on error. 10214 * 10215 * @param pVCpu The cross context virtual CPU structure of the calling thread. 10216 * @param iSegReg The index of the segment register to use for 10217 * this access. The base and limits are checked. 10218 * @param GCPtrMem The address of the guest memory. 10219 * @param pu256Value Pointer to the value to store. 10220 */ 10221 IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) 10222 { 10223 /* The lazy approach for now... */ 10224 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 10225 pu256Dst->au64[0] = pu256Value->au64[0]; 10226 pu256Dst->au64[1] = pu256Value->au64[1]; 10227 pu256Dst->au64[2] = pu256Value->au64[2]; 10228 pu256Dst->au64[3] = pu256Value->au64[3]; 10229 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 10230 } 10231 #endif 10232 10233 10234 /** 10235 * Stores a data dqword, AVX aligned. 10236 * 10237 * @returns Strict VBox status code. 10238 * @param pVCpu The cross context virtual CPU structure of the calling thread. 10239 * @param iSegReg The index of the segment register to use for 10240 * this access. The base and limits are checked. 10241 * @param GCPtrMem The address of the guest memory. 10242 * @param pu256Value Pointer to the value to store. 10243 */ 10244 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) 10245 { 10246 /* The lazy approach for now... */ 10247 if (GCPtrMem & 31) 10248 return iemRaiseGeneralProtectionFault0(pVCpu); 10249 10250 PRTUINT256U pu256Dst; 10251 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 10252 if (rc == VINF_SUCCESS) 10253 { 10254 pu256Dst->au64[0] = pu256Value->au64[0]; 10255 pu256Dst->au64[1] = pu256Value->au64[1]; 10256 pu256Dst->au64[2] = pu256Value->au64[2]; 10257 pu256Dst->au64[3] = pu256Value->au64[3]; 10258 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 10259 } 10260 return rc; 10261 } 10262 10263 10264 #ifdef IEM_WITH_SETJMP 10265 /** 10266 * Stores a data dqword, AVX aligned. 10267 * 10268 * @returns Strict VBox status code. 10269 * @param pVCpu The cross context virtual CPU structure of the calling thread. 10270 * @param iSegReg The index of the segment register to use for 10271 * this access. The base and limits are checked. 10272 * @param GCPtrMem The address of the guest memory. 10273 * @param pu256Value Pointer to the value to store. 10274 */ 10275 DECL_NO_INLINE(IEM_STATIC, void) 10276 iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) 10277 { 10278 /* The lazy approach for now... */ 10279 if ((GCPtrMem & 31) == 0) 10280 { 10281 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 10282 pu256Dst->au64[0] = pu256Value->au64[0]; 10283 pu256Dst->au64[1] = pu256Value->au64[1]; 10284 pu256Dst->au64[2] = pu256Value->au64[2]; 10285 pu256Dst->au64[3] = pu256Value->au64[3]; 10286 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 10287 return; 10288 } 10289 10290 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu); 10291 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 10292 } 10293 #endif 10294 10295 10296 /** 10185 10297 * Stores a descriptor register (sgdt, sidt). 10186 10298 * … … 11381 11493 } while (0) 11382 11494 11495 #define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \ 11496 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \ 11497 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ 11498 (a_u64Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \ 11499 } while (0) 11500 #define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \ 11501 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \ 11502 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ 11503 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \ 11504 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \ 11505 } while (0) 11506 #define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \ 11507 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \ 11508 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \ 11509 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \ 11510 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \ 11511 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \ 11512 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \ 11513 } while (0) 11514 11383 11515 #define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0) 11384 11516 #define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \ … … 11419 11551 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \ 11420 11552 } while (0) 11553 11421 11554 #define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \ 11422 11555 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \ … … 11439 11572 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \ 11440 11573 } while (0) 11574 11441 11575 #define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \ 11442 11576 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \ … … 11744 11878 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \ 11745 11879 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)) 11880 #endif 11881 11882 #ifndef IEM_WITH_SETJMP 11883 # define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \ 11884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))) 11885 # define IEM_MC_STORE_MEM_U256_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u256Value) \ 11886 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))) 11887 #else 11888 # define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \ 11889 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)) 11890 # define IEM_MC_STORE_MEM_U256_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u256Value) \ 11891 iemMemStoreDataU256AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)) 11746 11892 #endif 11747 11893 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r66906 r66909 239 239 'Ma': ( 'IDX_UseModRM', 'rm', '%Ma', 'Ma', ), ##< Only used by BOUND. 240 240 'Mb_RO': ( 'IDX_UseModRM', 'rm', '%Mb', 'Mb', ), 241 'Md': ( 'IDX_UseModRM', 'rm', '%Md', 'Md', ), 241 242 'Md_RO': ( 'IDX_UseModRM', 'rm', '%Md', 'Md', ), 242 243 'Md_WO': ( 'IDX_UseModRM', 'rm', '%Md', 'Md', ), -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap1.cpp.h
r66906 r66909 219 219 * @optest op1=0 op2=0 op3=-22 -> op1=0xffffffea 220 220 * @optest op1=3 op2=-1 op3=0x77 -> op1=-4294967177 221 * @oponly222 221 */ 223 222 IEMOP_MNEMONIC3(VEX_RVM, VMOVSS, vmovss, Vss_WO, HdqCss, Uss, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); … … 246 245 * @optest op1=1 op2=2 -> op1=2 247 246 * @optest op1=0 op2=-22 -> op1=-22 248 * @oponly249 247 */ 250 IEMOP_MNEMONIC2(VEX_XM, VMOVSS, vmovss, VssZx_WO, Wss, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE);248 IEMOP_MNEMONIC2(VEX_XM, VMOVSS, vmovss, VssZx_WO, Md, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); 251 249 IEM_MC_BEGIN(0, 2); 252 250 IEM_MC_LOCAL(uint32_t, uSrc); … … 286 284 * @optest op1=3 op2=-1 op3=0x77 -> 287 285 * op1=0xffffffffffffffff0000000000000077 288 * @oponly289 286 */ 290 287 IEMOP_MNEMONIC3(VEX_RVM, VMOVSD, vmovsd, Vsd_WO, HdqCsd, Usd, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); … … 313 310 * @optest op1=1 op2=2 -> op1=2 314 311 * @optest op1=0 op2=-22 -> op1=-22 315 * @oponly316 312 */ 317 313 IEMOP_MNEMONIC2(VEX_XM, VMOVSD, vmovsd, VsdZx_WO, Mq, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); … … 335 331 } 336 332 333 337 334 /** 338 * @ opcode 0x11 339 * @ oppfx none 340 * @ opcpuid sse 341 * @ opgroup og_sse_simdfp_datamove 342 * @ opxcpttype 4UA 343 * @ optest op1=1 op2=2 -> op1=2 344 * @ optest op1=0 op2=-42 -> op1=-42 335 * @opcode 0x11 336 * @oppfx none 337 * @opcpuid avx 338 * @opgroup og_avx_simdfp_datamove 339 * @opxcpttype 4UA 340 * @optest op1=1 op2=2 -> op1=2 341 * @optest op1=0 op2=-22 -> op1=-22 342 * @oponly 345 343 */ 346 FNIEMOP_STUB(iemOp_vmovups_Wps_Vps); 347 //FNIEMOP_DEF(iemOp_vmovups_Wps_Vps) 348 //{ 349 // IEMOP_MNEMONIC2(MR, VMOVUPS, vmovups, Wps, Vps, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); 350 // uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 351 // if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 352 // { 353 // /* 354 // * Register, register. 355 // */ 356 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 357 // IEM_MC_BEGIN(0, 0); 358 // IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 359 // IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 360 // IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 361 // ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 362 // IEM_MC_ADVANCE_RIP(); 363 // IEM_MC_END(); 364 // } 365 // else 366 // { 367 // /* 368 // * Memory, register. 369 // */ 370 // IEM_MC_BEGIN(0, 2); 371 // IEM_MC_LOCAL(RTUINT128U, uSrc); /** @todo optimize this one day... */ 372 // IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 373 // 374 // IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 375 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 376 // IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 377 // IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 378 // 379 // IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 380 // IEM_MC_STORE_MEM_U128(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 381 // 382 // IEM_MC_ADVANCE_RIP(); 383 // IEM_MC_END(); 384 // } 385 // return VINF_SUCCESS; 386 //} 344 FNIEMOP_DEF(iemOp_vmovups_Wps_Vps) 345 { 346 IEMOP_MNEMONIC2(VEX_MR, VMOVUPS, vmovups, Wps_WO, Vps, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); 347 Assert(pVCpu->iem.s.uVexLength <= 1); 348 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 349 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 350 { 351 /* 352 * Register, register. 353 */ 354 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 355 IEM_MC_BEGIN(0, 0); 356 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 357 IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE(); 358 if (pVCpu->iem.s.uVexLength == 0) 359 IEM_MC_COPY_YREG_U128_ZX_VLMAX((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 360 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 361 else 362 IEM_MC_COPY_YREG_U256_ZX_VLMAX((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 363 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 364 IEM_MC_ADVANCE_RIP(); 365 IEM_MC_END(); 366 } 367 else if (pVCpu->iem.s.uVexLength == 0) 368 { 369 /* 370 * 128-bit: Memory, register. 371 */ 372 IEM_MC_BEGIN(0, 2); 373 IEM_MC_LOCAL(RTUINT128U, uSrc); 374 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 375 376 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 377 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 378 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 379 IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ(); 380 381 IEM_MC_FETCH_YREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 382 IEM_MC_STORE_MEM_U128(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 383 384 IEM_MC_ADVANCE_RIP(); 385 IEM_MC_END(); 386 } 387 else 388 { 389 /* 390 * 256-bit: Memory, register. 391 */ 392 IEM_MC_BEGIN(0, 2); 393 IEM_MC_LOCAL(RTUINT256U, uSrc); 394 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 395 396 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 397 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 398 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 399 IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ(); 400 401 IEM_MC_FETCH_YREG_U256(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 402 IEM_MC_STORE_MEM_U256(pVCpu->iem.s.iEffSeg, GCPtrEffSrc, uSrc); 403 404 IEM_MC_ADVANCE_RIP(); 405 IEM_MC_END(); 406 } 407 return VINF_SUCCESS; 408 } 387 409 388 410
Note:
See TracChangeset
for help on using the changeset viewer.