Changeset 65508 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 29, 2017 5:33:21 PM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r65464 r65508 10476 10476 return iemRaiseGeneralProtectionFault0(pVCpu); \ 10477 10477 } while (0) 10478 #define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \ 10479 do { \ 10480 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \ 10481 else return iemRaiseGeneralProtectionFault0(pVCpu); \ 10482 } while (0) 10478 10483 10479 10484 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r65506 r65508 6406 6406 Args.pu128RbxRcx = pu128RbxRcx; 6407 6407 Args.pEFlags = pEFlags; 6408 # ifdef VBOX_STRICT 6409 Args.cCalls = 0; 6410 # endif 6408 6411 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, 6409 6412 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args); 6410 6413 Assert(Args.cCalls == 1); 6411 RT_NOREF(cbInstr); 6414 if (rcStrict == VINF_SUCCESS) 6415 { 6416 /* Duplicated tail code. */ 6417 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW); 6418 if (rcStrict == VINF_SUCCESS) 6419 { 6420 PCPUMCTX pCtx = pVCpu->iem.s.CTX_SUFF(pCtx); 6421 pCtx->eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */ 6422 if (!(*pEFlags & X86_EFL_ZF)) 6423 { 6424 pCtx->rax = pu128RaxRdx->s.Lo; 6425 pCtx->rdx = pu128RaxRdx->s.Hi; 6426 } 6427 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6428 } 6429 } 6412 6430 return rcStrict; 6413 6431 #else -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r65506 r65508 6849 6849 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 6850 6850 IEMOP_HLP_DONE_DECODING(); 6851 IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(GCPtrEffDst, 16); 6851 6852 IEM_MC_MEM_MAP(pu128MemDst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 6852 6853 … … 6871 6872 # endif 6872 6873 { 6873 /* Note! The fallback for 32-bit systems and systems without CX16 is to use 6874 SSE instructions for 16-byte loads and stores. Since these aren't 6875 atomic and there are cycles between the loading and storing, this 6876 only works correctly in UNI CPU guests. If guest SMP is active 6877 we have no choice but to use a rendezvous callback here. Sigh. */ 6878 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); /* HACK ALERT! */ 6879 6874 /* Note! The fallback for 32-bit systems and systems without CX16 is multiple 6875 accesses and not all all atomic, which works fine on in UNI CPU guest 6876 configuration (ignoring DMA). If guest SMP is active we have no choice 6877 but to use a rendezvous callback here. Sigh. */ 6880 6878 if (pVCpu->CTX_SUFF(pVM)->cCpus == 1) 6881 6879 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 6882 6880 else 6881 { 6883 6882 IEM_MC_CALL_CIMPL_4(iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 6883 /* Does not get here, tail code is duplicated in iemCImpl_cmpxchg16b_fallback_rendezvous. */ 6884 } 6884 6885 } 6885 6886 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r65194 r65508 335 335 #define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() do {} while (0) 336 336 #define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() do {} while (0) 337 #define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \ 338 do { AssertCompile(RT_IS_POWER_OF_TWO(a_cbAlign)); CHK_TYPE(RTGCPTR, a_EffAddr); } while (0) 337 339 338 340 #define IEM_MC_LOCAL(a_Type, a_Name) \
Note:
See TracChangeset
for help on using the changeset viewer.