Changeset 105356 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 16, 2024 1:00:43 PM (8 months ago)
- svn:sync-xref-src-repo-rev:
- 164009
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp ¶
r105148 r105356 9328 9328 9329 9329 /** 9330 * Implements 'VSTMXCSR'. 9331 * 9332 * @param iEffSeg The effective segment register for @a GCPtrEff. 9333 * @param GCPtrEff The address of the image. 9334 */ 9335 IEM_CIMPL_DEF_2(iemCImpl_vldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff) 9336 { 9337 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx); 9338 9339 /* 9340 * Raise exceptions. 9341 */ 9342 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu) 9343 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM) 9344 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) 9345 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) 9346 { 9347 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)) 9348 { 9349 /* 9350 * Do the job. 9351 */ 9352 uint32_t fNewMxCsr; 9353 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff); 9354 if (rcStrict == VINF_SUCCESS) 9355 { 9356 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM)); 9357 if (!(fNewMxCsr & ~fMxCsrMask)) 9358 { 9359 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr; 9360 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 9361 } 9362 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n", 9363 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask)); 9364 return iemRaiseGeneralProtectionFault0(pVCpu); 9365 } 9366 return rcStrict; 9367 } 9368 return iemRaiseDeviceNotAvailable(pVCpu); 9369 } 9370 return iemRaiseUndefinedOpcode(pVCpu); 9371 } 9372 9373 9374 /** 9330 9375 * Commmon routine for fnstenv and fnsave. 9331 9376 * -
TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAllInstVexMap1.cpp.h ¶
r105355 r105356 5613 5613 * @ optest op1=0x2083 cr0|=ts,em,mp cr4&~=osfxsr -> value.xcpt=0x6 5614 5614 */ 5615 FNIEMOP_STUB_1(iemOp_VGrp15_vldmxcsr, uint8_t, bRm); 5616 //FNIEMOP_DEF_1(iemOp_VGrp15_vldmxcsr, uint8_t, bRm) 5617 //{ 5618 // IEMOP_MNEMONIC1(M_MEM, VLDMXCSR, vldmxcsr, MdRO, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 5619 // IEM_MC_BEGIN(IEM_MC_F_NOT_286_OR_OLDER, 0); 5620 // IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5621 // IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5622 // IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV_EX(fAvx); 5623 // IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 5624 // IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pVCpu->iem.s.iEffSeg, 0); 5625 // IEM_MC_CALL_CIMPL_2(iemCImpl_ldmxcsr, iEffSeg, GCPtrEff); 5626 // IEM_MC_END(); 5627 // return VINF_SUCCESS; 5628 //} 5615 FNIEMOP_DEF_1(iemOp_VGrp15_vldmxcsr, uint8_t, bRm) 5616 { 5617 IEMOP_MNEMONIC1(M_MEM, VLDMXCSR, vldmxcsr, Md_RO, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_VEX_L_ZERO); 5618 IEM_MC_BEGIN(IEM_MC_F_NOT_286_OR_OLDER, 0); 5619 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5620 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5621 IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV_EX(fAvx); 5622 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 5623 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pVCpu->iem.s.iEffSeg, 0); 5624 IEM_MC_CALL_CIMPL_2(IEM_CIMPL_F_FPU, RT_BIT_64(kIemNativeGstReg_MxCsr), iemCImpl_vldmxcsr, iEffSeg, GCPtrEff); 5625 IEM_MC_END(); 5626 return VINF_SUCCESS; 5627 } 5629 5628 5630 5629
Note:
See TracChangeset
for help on using the changeset viewer.