- Timestamp:
- Dec 1, 2023 10:43:39 PM (14 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r102310 r102424 6577 6577 } 6578 6578 6579 6580 /** 6581 * Rolls back the guest memory (conceptually only) and unmaps it. 6582 * 6583 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6584 * @param pvMem The mapping. 6585 * @param fAccess The kind of access. 6586 */ 6587 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT 6588 { 6589 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 6590 AssertReturnVoid(iMemMap >= 0); 6591 6592 /* Unlock it if necessary. */ 6593 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED)) 6594 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 6595 6596 /* Free the entry. */ 6597 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 6598 Assert(pVCpu->iem.s.cActiveMappings != 0); 6599 pVCpu->iem.s.cActiveMappings--; 6600 } 6601 6579 6602 #ifdef IEM_WITH_SETJMP 6580 6603 … … 6951 6974 } 6952 6975 6976 6977 /** Fallback for iemMemRollbackAndUnmapWo. */ 6978 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT 6979 { 6980 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo); 6981 iemMemRollbackAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R); 6982 } 6983 6953 6984 #endif /* IEM_WITH_SETJMP */ 6954 6985 … … 7062 7093 #include "IEMAllMemRWTmpl.cpp.h" 7063 7094 7095 /* See IEMAllMemRWTmplInline.cpp.h */ 7096 #define TMPL_MEM_BY_REF 7097 7098 #define TMPL_MEM_TYPE RTFLOAT80U 7099 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) 7100 #define TMPL_MEM_FN_SUFF R80 7101 #define TMPL_MEM_FMT_TYPE "%.10Rhxs" 7102 #define TMPL_MEM_FMT_DESC "tword" 7103 #include "IEMAllMemRWTmpl.cpp.h" 7104 7064 7105 7065 7106 /** … … 7117 7158 #endif 7118 7159 return rc; 7119 }7120 #endif7121 7122 7123 /**7124 * Fetches a data tword.7125 *7126 * @returns Strict VBox status code.7127 * @param pVCpu The cross context virtual CPU structure of the calling thread.7128 * @param pr80Dst Where to return the tword.7129 * @param iSegReg The index of the segment register to use for7130 * this access. The base and limits are checked.7131 * @param GCPtrMem The address of the guest memory.7132 */7133 VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT7134 {7135 /* The lazy approach for now... */7136 PCRTFLOAT80U pr80Src;7137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);7138 if (rc == VINF_SUCCESS)7139 {7140 *pr80Dst = *pr80Src;7141 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);7142 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));7143 }7144 return rc;7145 }7146 7147 7148 #ifdef IEM_WITH_SETJMP7149 /**7150 * Fetches a data tword, longjmp on error.7151 *7152 * @param pVCpu The cross context virtual CPU structure of the calling thread.7153 * @param pr80Dst Where to return the tword.7154 * @param iSegReg The index of the segment register to use for7155 * this access. The base and limits are checked.7156 * @param GCPtrMem The address of the guest memory.7157 */7158 void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP7159 {7160 /* The lazy approach for now... */7161 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);7162 *pr80Dst = *pr80Src;7163 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);7164 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));7165 7160 } 7166 7161 #endif -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r102397 r102424 9662 9662 { 9663 9663 IEMOP_MNEMONIC(fst_m32r, "fst m32r"); 9664 IEM_MC_BEGIN(3, 2, 0, 0);9664 IEM_MC_BEGIN(3, 3, 0, 0); 9665 9665 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 9666 IEM_MC_LOCAL(uint16_t, u16Fsw);9667 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);9668 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);9669 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);9670 9671 9666 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 9667 9672 9668 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 9673 9669 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 9674 9670 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 9675 9676 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/);9677 9671 IEM_MC_PREPARE_FPU_USAGE(); 9672 9673 IEM_MC_LOCAL(uint8_t, bUnmapInfo); 9674 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1); 9675 IEM_MC_MEM_MAP_R32_WO(pr32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 9676 9677 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); 9678 9678 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) { 9679 IEM_MC_LOCAL(uint16_t, u16Fsw); 9680 IEM_MC_ARG_LOCAL_REF(uint16_t *,pu16Fsw, u16Fsw, 0); 9679 9681 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value); 9680 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE (pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);9682 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(pr32Dst, bUnmapInfo, u16Fsw); 9681 9683 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); 9682 9684 } IEM_MC_ELSE() { 9683 9685 IEM_MC_IF_FCW_IM() { 9684 9686 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst); 9685 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W); 9687 IEM_MC_MEM_COMMIT_AND_UNMAP_WO(pr32Dst, bUnmapInfo); 9688 } IEM_MC_ELSE() { 9689 IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(pr32Dst, bUnmapInfo); 9686 9690 } IEM_MC_ENDIF(); 9687 9691 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); … … 9697 9701 { 9698 9702 IEMOP_MNEMONIC(fstp_m32r, "fstp m32r"); 9699 IEM_MC_BEGIN(3, 2, 0, 0);9703 IEM_MC_BEGIN(3, 3, 0, 0); 9700 9704 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 9701 IEM_MC_LOCAL(uint16_t, u16Fsw);9702 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);9703 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);9704 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);9705 9706 9705 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 9706 9707 9707 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 9708 9708 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 9709 9709 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 9710 9711 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/);9712 9710 IEM_MC_PREPARE_FPU_USAGE(); 9711 9712 IEM_MC_LOCAL(uint8_t, bUnmapInfo); 9713 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1); 9714 IEM_MC_MEM_MAP_R32_WO(pr32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 9715 9716 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); 9713 9717 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) { 9718 IEM_MC_LOCAL(uint16_t, u16Fsw); 9719 IEM_MC_ARG_LOCAL_REF(uint16_t *,pu16Fsw, u16Fsw, 0); 9714 9720 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value); 9715 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE (pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);9721 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(pr32Dst, bUnmapInfo, u16Fsw); 9716 9722 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); 9717 9723 } IEM_MC_ELSE() { 9718 9724 IEM_MC_IF_FCW_IM() { 9719 9725 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst); 9720 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W); 9726 IEM_MC_MEM_COMMIT_AND_UNMAP_WO(pr32Dst, bUnmapInfo); 9727 } IEM_MC_ELSE() { 9728 IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(pr32Dst, bUnmapInfo); 9721 9729 } IEM_MC_ENDIF(); 9722 9730 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); … … 10779 10787 { 10780 10788 IEMOP_MNEMONIC(fisttp_m32i, "fisttp m32i"); 10781 IEM_MC_BEGIN(3, 2, 0, 0);10789 IEM_MC_BEGIN(3, 3, 0, 0); 10782 10790 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10783 IEM_MC_LOCAL(uint16_t, u16Fsw);10784 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);10785 IEM_MC_ARG(int32_t *, pi32Dst, 1);10786 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);10787 10788 10791 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10792 10789 10793 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 10790 10794 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 10791 10795 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 10792 10793 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/);10794 10796 IEM_MC_PREPARE_FPU_USAGE(); 10797 10798 IEM_MC_LOCAL(uint8_t, bUnmapInfo); 10799 IEM_MC_ARG(int32_t *, pi32Dst, 1); 10800 IEM_MC_MEM_MAP_I32_WO(pi32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10801 10802 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); 10795 10803 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) { 10804 IEM_MC_LOCAL(uint16_t, u16Fsw); 10805 IEM_MC_ARG_LOCAL_REF(uint16_t *,pu16Fsw, u16Fsw, 0); 10796 10806 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i32, pu16Fsw, pi32Dst, pr80Value); 10797 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE (pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);10807 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(pi32Dst, bUnmapInfo, u16Fsw); 10798 10808 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); 10799 10809 } IEM_MC_ELSE() { 10800 10810 IEM_MC_IF_FCW_IM() { 10801 10811 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */); 10802 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W); 10812 IEM_MC_MEM_COMMIT_AND_UNMAP_WO(pi32Dst, bUnmapInfo); 10813 } IEM_MC_ELSE() { 10814 IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(pi32Dst, bUnmapInfo); 10803 10815 } IEM_MC_ENDIF(); 10804 10816 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); … … 10814 10826 { 10815 10827 IEMOP_MNEMONIC(fist_m32i, "fist m32i"); 10816 IEM_MC_BEGIN(3, 2, 0, 0);10828 IEM_MC_BEGIN(3, 3, 0, 0); 10817 10829 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10818 IEM_MC_LOCAL(uint16_t, u16Fsw);10819 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);10820 IEM_MC_ARG(int32_t *, pi32Dst, 1);10821 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);10822 10823 10830 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10831 10824 10832 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 10825 10833 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 10826 10834 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 10827 10828 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/);10829 10835 IEM_MC_PREPARE_FPU_USAGE(); 10836 10837 IEM_MC_LOCAL(uint8_t, bUnmapInfo); 10838 IEM_MC_ARG(int32_t *, pi32Dst, 1); 10839 IEM_MC_MEM_MAP_I32_WO(pi32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10840 10841 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); 10830 10842 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) { 10843 IEM_MC_LOCAL(uint16_t, u16Fsw); 10844 IEM_MC_ARG_LOCAL_REF(uint16_t *,pu16Fsw, u16Fsw, 0); 10831 10845 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value); 10832 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE (pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);10846 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(pi32Dst, bUnmapInfo, u16Fsw); 10833 10847 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); 10834 10848 } IEM_MC_ELSE() { 10835 10849 IEM_MC_IF_FCW_IM() { 10836 10850 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */); 10837 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W); 10851 IEM_MC_MEM_COMMIT_AND_UNMAP_WO(pi32Dst, bUnmapInfo); 10852 } IEM_MC_ELSE() { 10853 IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(pi32Dst, bUnmapInfo); 10838 10854 } IEM_MC_ENDIF(); 10839 10855 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); … … 10851 10867 IEM_MC_BEGIN(3, 2, 0, 0); 10852 10868 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10853 IEM_MC_LOCAL(uint16_t, u16Fsw);10854 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);10855 IEM_MC_ARG(int32_t *, pi32Dst, 1);10856 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);10857 10858 10869 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10870 10859 10871 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 10860 10872 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 10861 10873 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 10862 10863 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 1 /*arg*/);10864 10874 IEM_MC_PREPARE_FPU_USAGE(); 10875 10876 IEM_MC_LOCAL(uint8_t, bUnmapInfo); 10877 IEM_MC_ARG(int32_t *, pi32Dst, 1); 10878 IEM_MC_MEM_MAP_I32_WO(pi32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10879 10880 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); 10865 10881 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) { 10882 IEM_MC_LOCAL(uint16_t, u16Fsw); 10883 IEM_MC_ARG_LOCAL_REF(uint16_t *,pu16Fsw, u16Fsw, 0); 10866 10884 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value); 10867 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE (pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);10885 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(pi32Dst, bUnmapInfo, u16Fsw); 10868 10886 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); 10869 10887 } IEM_MC_ELSE() { 10870 10888 IEM_MC_IF_FCW_IM() { 10871 10889 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */); 10872 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W); 10890 IEM_MC_MEM_COMMIT_AND_UNMAP_WO(pi32Dst, bUnmapInfo); 10891 } IEM_MC_ELSE() { 10892 IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(pi32Dst, bUnmapInfo); 10873 10893 } IEM_MC_ENDIF(); 10874 10894 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); … … 10916 10936 { 10917 10937 IEMOP_MNEMONIC(fstp_m80r, "fstp m80r"); 10918 IEM_MC_BEGIN(3, 2, 0, 0);10938 IEM_MC_BEGIN(3, 3, 0, 0); 10919 10939 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 10920 IEM_MC_LOCAL(uint16_t, u16Fsw);10921 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);10922 IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1);10923 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);10924 10925 10940 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 10941 10926 10942 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 10927 10943 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE(); 10928 10944 IEM_MC_MAYBE_RAISE_FPU_XCPT(); 10929 10930 IEM_MC_MEM_MAP_EX(pr80Dst, IEM_ACCESS_DATA_W, sizeof(*pr80Dst), pVCpu->iem.s.iEffSeg, GCPtrEffDst, 7 /*cbAlign*/, 1 /*arg*/);10931 10945 IEM_MC_PREPARE_FPU_USAGE(); 10946 10947 IEM_MC_LOCAL(uint8_t, bUnmapInfo); 10948 IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1); 10949 IEM_MC_MEM_MAP_R80_WO(pr80Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 10950 10951 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2); 10932 10952 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0) { 10953 IEM_MC_LOCAL(uint16_t, u16Fsw); 10954 IEM_MC_ARG_LOCAL_REF(uint16_t *,pu16Fsw, u16Fsw, 0); 10933 10955 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r80, pu16Fsw, pr80Dst, pr80Value); 10934 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE (pr80Dst, IEM_ACCESS_DATA_W, u16Fsw);10956 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(pr80Dst, bUnmapInfo, u16Fsw); 10935 10957 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); 10936 10958 } IEM_MC_ELSE() { 10937 10959 IEM_MC_IF_FCW_IM() { 10938 10960 IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(pr80Dst); 10939 IEM_MC_MEM_COMMIT_AND_UNMAP(pr80Dst, IEM_ACCESS_DATA_W); 10961 IEM_MC_MEM_COMMIT_AND_UNMAP_WO(pr80Dst, bUnmapInfo); 10962 } IEM_MC_ELSE() { 10963 IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(pr80Dst, bUnmapInfo); 10940 10964 } IEM_MC_ENDIF(); 10941 10965 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pVCpu->iem.s.iEffSeg, GCPtrEffDst, pVCpu->iem.s.uFpuOpcode); -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r102409 r102424 3025 3025 'IEM_MC_MEM_COMMIT_AND_UNMAP_WO': (McBlock.parseMcGeneric, True, False, ), 3026 3026 'IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE': (McBlock.parseMcGeneric, True, False, ), 3027 'IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO': (McBlock.parseMcGeneric, True, False, ), 3027 3028 'IEM_MC_MEM_MAP': (McBlock.parseMcGeneric, True, False, ), 3028 3029 'IEM_MC_MEM_MAP_EX': (McBlock.parseMcGeneric, True, False, ), 3030 'IEM_MC_MEM_MAP_I32_WO': (McBlock.parseMcGeneric, True, False, ), 3031 'IEM_MC_MEM_MAP_R32_WO': (McBlock.parseMcGeneric, True, False, ), 3029 3032 'IEM_MC_MEM_MAP_U8_RW': (McBlock.parseMcGeneric, True, False, ), 3030 3033 'IEM_MC_MEM_MAP_U8_RO': (McBlock.parseMcGeneric, True, False, ), … … 3039 3042 'IEM_MC_MEM_MAP_U64_RO': (McBlock.parseMcGeneric, True, False, ), 3040 3043 'IEM_MC_MEM_MAP_U64_WO': (McBlock.parseMcGeneric, True, False, ), 3044 'IEM_MC_MEM_MAP_R80_WO': (McBlock.parseMcGeneric, True, False, ), 3045 'IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO': (McBlock.parseMcGeneric, True, False, ), 3041 3046 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': (McBlock.parseMcGeneric, True, False, ), 3042 3047 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': (McBlock.parseMcGeneric, True, False, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r100868 r102424 72 72 * Safe/fallback fetch function that longjmps on error. 73 73 */ 74 # ifdef TMPL_MEM_BY_REF 75 void 76 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 77 { 78 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 79 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++; 80 # endif 81 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*pSrc), iSegReg, GCPtrMem, 82 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); 83 *pDst = *pSrc; 84 iemMemCommitAndUnmapJmp(pVCpu, (void *)pSrc, IEM_ACCESS_DATA_R); 85 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst)); 86 } 87 # else /* !TMPL_MEM_BY_REF */ 74 88 TMPL_MEM_TYPE 75 89 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 76 90 { 77 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)91 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 78 92 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++; 79 # endif93 # endif 80 94 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*puSrc), iSegReg, GCPtrMem, 81 95 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); … … 85 99 return uRet; 86 100 } 101 # endif /* !TMPL_MEM_BY_REF */ 87 102 #endif /* IEM_WITH_SETJMP */ 88 103 … … 90 105 91 106 /** 92 * Standard fetchfunction.107 * Standard store function. 93 108 * 94 109 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP … … 96 111 */ 97 112 VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, 113 #ifdef TMPL_MEM_BY_REF 114 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT 115 #else 98 116 TMPL_MEM_TYPE uValue) RT_NOEXCEPT 117 #endif 99 118 { 100 119 /* The lazy approach for now... */ … … 103 122 if (rc == VINF_SUCCESS) 104 123 { 124 #ifdef TMPL_MEM_BY_REF 125 *puDst = *pValue; 126 #else 105 127 *puDst = uValue; 128 #endif 106 129 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_DATA_W); 130 #ifdef TMPL_MEM_BY_REF 131 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue)); 132 #else 107 133 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue)); 134 #endif 108 135 } 109 136 return rc; … … 119 146 * this access. The base and limits are checked. 120 147 * @param GCPtrMem The address of the guest memory. 121 * @param u 8ValueThe value to store.148 * @param uValue The value to store. 122 149 */ 123 150 void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, 151 #ifdef TMPL_MEM_BY_REF 152 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP 153 #else 124 154 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 155 #endif 125 156 { 126 157 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 127 158 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 128 159 # endif 160 #ifdef TMPL_MEM_BY_REF 161 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue)); 162 #else 129 163 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue)); 164 #endif 130 165 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(*puDst), iSegReg, GCPtrMem, 131 166 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN); 167 #ifdef TMPL_MEM_BY_REF 168 *puDst = *pValue; 169 #else 132 170 *puDst = uValue; 171 #endif 133 172 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_DATA_W); 134 173 } … … 461 500 #undef TMPL_MEM_FMT_TYPE 462 501 #undef TMPL_MEM_FMT_DESC 502 #undef TMPL_MEM_BY_REF 463 503 #undef TMPL_WITH_PUSH_SREG 464 504 -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r102367 r102424 47 47 #endif 48 48 49 50 /** Helper for checking if @a a_GCPtr is acceptably aligned and fully within 51 * the page for a TMPL_MEM_TYPE. */ 49 52 #if TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE 50 # error Have not implemented TMPL_MEM_TYPE_ALIGN smaller than TMPL_MEM_TYPE_SIZE - 1. 53 # define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) \ 54 && ((a_GCPtr) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE)) \ 55 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE)) 56 #else 57 # define TMPL_MEM_ALIGN_CHECK(a_GCPtr) ( !((a_GCPtr) & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ \ 58 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, (a_GCPtr), TMPL_MEM_TYPE)) 59 #endif 60 61 /** 62 * Values have to be passed by reference if larger than uint64_t. 63 * 64 * This is a restriction of the Visual C++ AMD64 calling convention, 65 * the gcc AMD64 and ARM64 ABIs can easily pass and return to 128-bit via 66 * registers. For larger values like RTUINT256U, Visual C++ AMD and ARM64 67 * passes them by hidden reference, whereas the gcc AMD64 ABI will use stack. 68 * 69 * So, to avoid passing anything on the stack, we just explictly pass values by 70 * reference (pointer) if they are larger than uint64_t. This ASSUMES 64-bit 71 * host. 72 */ 73 #if TMPL_MEM_TYPE_SIZE > 8 74 # define TMPL_MEM_BY_REF 75 #else 76 # undef TMPL_MEM_BY_REF 51 77 #endif 52 78 … … 64 90 * @note The @a iSegRef is not allowed to be UINT8_MAX! 65 91 */ 92 #ifdef TMPL_MEM_BY_REF 93 DECL_INLINE_THROW(void) 94 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 95 #else 66 96 DECL_INLINE_THROW(TMPL_MEM_TYPE) 67 97 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 98 #endif 68 99 { 69 100 AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE); … … 74 105 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 75 106 # if TMPL_MEM_TYPE_SIZE > 1 76 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ 77 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 107 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 78 108 # endif 79 109 { … … 100 130 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 101 131 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 132 # ifdef TMPL_MEM_BY_REF 133 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 134 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n", 135 iSegReg, GCPtrMem, GCPtrEff, pValue)); 136 return; 137 # else 102 138 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 103 139 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n", 104 140 iSegReg, GCPtrMem, GCPtrEff, uRet)); 105 141 return uRet; 142 # endif 106 143 } 107 144 } … … 112 149 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 113 150 # endif 151 # ifdef TMPL_MEM_BY_REF 152 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem); 153 # else 114 154 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem); 155 # endif 115 156 } 116 157 … … 119 160 * Inlined flat addressing fetch function that longjumps on error. 120 161 */ 162 # ifdef TMPL_MEM_BY_REF 163 DECL_INLINE_THROW(void) 164 RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 165 # else 121 166 DECL_INLINE_THROW(TMPL_MEM_TYPE) 122 167 RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 168 # endif 123 169 { 124 170 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT … … 130 176 */ 131 177 # if TMPL_MEM_TYPE_SIZE > 1 132 AssertCompile(X86_CR0_AM == X86_EFL_AC); 133 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 134 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ 135 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) )) 178 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 136 179 # endif 137 180 { … … 158 201 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 159 202 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 203 # ifdef TMPL_MEM_BY_REF 204 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 205 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n", 206 GCPtrMem, pValue)); 207 return; 208 # else 160 209 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 161 210 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet)); 162 211 return uRet; 212 # endif 163 213 } 164 214 } … … 169 219 LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 170 220 # endif 221 # ifdef TMPL_MEM_BY_REF 222 RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem); 223 # else 171 224 return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem); 225 # endif 172 226 } 173 227 … … 185 239 DECL_INLINE_THROW(void) 186 240 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, 241 # ifdef TMPL_MEM_BY_REF 242 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP 243 # else 187 244 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 245 # endif 188 246 { 189 247 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) … … 193 251 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 194 252 # if TMPL_MEM_TYPE_SIZE > 1 195 AssertCompile(X86_CR0_AM == X86_EFL_AC); 196 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 197 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) /* If aligned, it will be within the page. */ 198 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 253 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 199 254 # endif 200 255 { … … 217 272 { 218 273 /* 219 * Store the dword and return. 220 */ 221 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 222 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 223 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 274 * Store the value and return. 275 */ 276 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 277 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 278 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 279 # ifdef TMPL_MEM_BY_REF 280 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue; 281 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n", 282 iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 283 # else 224 284 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue; 225 285 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n", 226 286 iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 287 # endif 227 288 return; 228 289 } … … 234 295 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem)); 235 296 # endif 297 # ifdef TMPL_MEM_BY_REF 298 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue); 299 # else 236 300 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue); 301 # endif 237 302 } 238 303 … … 243 308 DECL_INLINE_THROW(void) 244 309 RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, 245 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 310 # ifdef TMPL_MEM_BY_REF 311 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP 312 # else 313 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 314 # endif 246 315 { 247 316 AssertMsg( (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT … … 253 322 */ 254 323 # if TMPL_MEM_TYPE_SIZE > 1 255 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) 256 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) )) 324 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 257 325 # endif 258 326 { … … 275 343 { 276 344 /* 277 * Store the dword and return. 278 */ 279 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 280 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 281 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 345 * Store the value and return. 346 */ 347 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 348 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 349 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 350 # ifdef TMPL_MEM_BY_REF 351 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue; 352 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n", 353 GCPtrMem, pValue)); 354 # else 282 355 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue; 283 356 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue)); 357 # endif 284 358 return; 285 359 } … … 291 365 Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem)); 292 366 # endif 367 # ifdef TMPL_MEM_BY_REF 368 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue); 369 # else 293 370 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue); 371 # endif 294 372 } 295 373 … … 313 391 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 314 392 */ 315 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 316 # if TMPL_MEM_TYPE_SIZE > 1 317 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 318 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 393 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 394 # if TMPL_MEM_TYPE_SIZE > 1 395 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 319 396 # endif 320 397 { … … 371 448 */ 372 449 # if TMPL_MEM_TYPE_SIZE > 1 373 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) 374 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) )) 450 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 375 451 # endif 376 452 { … … 426 502 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 427 503 */ 428 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 429 # if TMPL_MEM_TYPE_SIZE > 1 430 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 431 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 504 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 505 # if TMPL_MEM_TYPE_SIZE > 1 506 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 432 507 # endif 433 508 { … … 484 559 */ 485 560 # if TMPL_MEM_TYPE_SIZE > 1 486 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) 487 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) )) 561 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 488 562 # endif 489 563 { … … 539 613 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 540 614 */ 541 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 542 # if TMPL_MEM_TYPE_SIZE > 1 543 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 544 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 545 # endif 615 RTGCPTR const GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 616 # if TMPL_MEM_TYPE_SIZE > 1 617 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 618 #endif 546 619 { 547 620 /* … … 596 669 */ 597 670 # if TMPL_MEM_TYPE_SIZE > 1 598 if (RT_LIKELY( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) 599 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, TMPL_MEM_TYPE) )) 671 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem))) 600 672 # endif 601 673 { … … 645 717 *********************************************************************************************************************************/ 646 718 # ifdef TMPL_MEM_WITH_STACK 719 # if TMPL_MEM_TYPE_SIZE > 8 720 # error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK" 721 # endif 722 # if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE 723 # error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK" 724 # endif 647 725 # ifdef IEM_WITH_SETJMP 648 726 … … 662 740 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop); 663 741 # if TMPL_MEM_TYPE_SIZE > 1 664 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 665 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 742 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 666 743 # endif 667 744 { … … 722 799 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop); 723 800 # if TMPL_MEM_TYPE_SIZE > 1 724 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 725 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 801 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff))) 726 802 # endif 727 803 { … … 847 923 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE); 848 924 # if TMPL_MEM_TYPE_SIZE > 1 849 if (RT_LIKELY( !(uNewEsp & TMPL_MEM_TYPE_ALIGN) 850 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE) )) 925 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp))) 851 926 # endif 852 927 { … … 904 979 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp; 905 980 # if TMPL_MEM_TYPE_SIZE > 1 906 if (RT_LIKELY( !(uOldEsp & TMPL_MEM_TYPE_ALIGN) 907 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldEsp, TMPL_MEM_TYPE) )) 981 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp))) 908 982 # endif 909 983 { … … 1021 1095 uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE); 1022 1096 # if TMPL_MEM_TYPE_SIZE > 1 1023 if (RT_LIKELY( !(uNewRsp & TMPL_MEM_TYPE_ALIGN) 1024 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewRsp, TMPL_MEM_TYPE) )) 1097 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp))) 1025 1098 # endif 1026 1099 { … … 1078 1151 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp; 1079 1152 # if TMPL_MEM_TYPE_SIZE > 1 1080 if (RT_LIKELY( !(uOldRsp & TMPL_MEM_TYPE_ALIGN) 1081 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldRsp, TMPL_MEM_TYPE) )) 1153 if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp))) 1082 1154 # endif 1083 1155 { … … 1136 1208 #undef TMPL_MEM_FMT_DESC 1137 1209 #undef TMPL_MEM_NO_STORE 1138 1210 #undef TMPL_MEM_ALIGN_CHECK 1211 #undef TMPL_MEM_BY_REF 1212 -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r102409 r102424 150 150 'IEM_MC_MEM_FLAT_MAP_U32_RO': (None, True, False, ), 151 151 'IEM_MC_MEM_FLAT_MAP_U32_RW': (None, True, False, ), 152 'IEM_MC_MEM_FLAT_MAP_I32_WO': (None, True, False, ), 153 'IEM_MC_MEM_FLAT_MAP_R32_WO': (None, True, False, ), 152 154 'IEM_MC_MEM_FLAT_MAP_U64_RO': (None, True, False, ), 153 155 'IEM_MC_MEM_FLAT_MAP_U64_RW': (None, True, False, ), 154 156 'IEM_MC_MEM_FLAT_MAP_U8_RO': (None, True, False, ), 155 157 'IEM_MC_MEM_FLAT_MAP_U8_RW': (None, True, False, ), 158 'IEM_MC_MEM_FLAT_MAP_R80_WO': (None, True, False, ), 156 159 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE': (None, True, False, ), 157 160 'IEM_MC_STORE_MEM_FLAT_U128': (None, True, False, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r102394 r102424 714 714 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ), 715 715 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ), 716 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ), 717 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ), 716 718 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ), 717 719 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ), 718 720 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ), 721 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ), 719 722 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ), 720 723 }; … … 1350 1353 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch); 1351 1354 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch); 1352 if len(self.dVariables) != cBefore:1353 raise Exception('Variables/arguments defined in conditional branches!');1355 #if len(self.dVariables) != cBefore: 1356 # raise Exception('Variables/arguments defined in conditional branches!'); 1354 1357 return True; 1355 1358 -
trunk/src/VBox/VMM/include/IEMInline.h
r101387 r102424 3744 3744 } 3745 3745 3746 DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT 3747 { 3748 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 3749 if (RT_LIKELY(bMapInfo == 0)) 3750 return; 3751 # endif 3752 iemMemRollbackAndUnmapWoSafe(pVCpu, pvMem, bMapInfo); 3753 } 3754 3746 3755 #endif /* IEM_WITH_SETJMP */ 3747 3756 … … 3819 3828 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3820 3829 3830 #undef TMPL_MEM_NO_STORE 3831 #undef TMPL_MEM_NO_MAPPING 3832 3833 #define TMPL_MEM_TYPE RTFLOAT80U 3834 #define TMPL_MEM_TYPE_ALIGN 7 3835 #define TMPL_MEM_TYPE_SIZE 10 3836 #define TMPL_MEM_FN_SUFF R80 3837 #define TMPL_MEM_FMT_TYPE "%.10Rhxs" 3838 #define TMPL_MEM_FMT_DESC "tword" 3839 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 3840 3821 3841 #undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK 3822 3842 -
trunk/src/VBox/VMM/include/IEMInternal.h
r102394 r102424 5045 5045 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT; 5046 5046 #endif 5047 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT; 5047 5048 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT; 5048 5049 VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT; … … 5110 5111 uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5111 5112 uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5113 void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PRTFLOAT80U pr80Dst) IEM_NOEXCEPT_MAY_LONGJMP; 5112 5114 # endif 5113 void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;5114 5115 void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5115 5116 void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 5143 5144 void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 5144 5145 void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 5146 void iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP; 5145 5147 #if 0 5146 5148 void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP; … … 5168 5170 uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5169 5171 uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5172 PRTFLOAT80U iemMemMapDataR80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5173 PRTFLOAT80U iemMemMapDataR80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5174 PCRTFLOAT80U iemMemMapDataR80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5170 5175 5171 5176 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5172 5177 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5173 5178 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5179 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT; 5174 5180 #endif 5175 5181 -
trunk/src/VBox/VMM/include/IEMMc.h
r102349 r102424 981 981 ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem))) 982 982 # define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \ 983 iemMemF etchDataR80Jmp(pVCpu, &(a_r80Dst), UINT8_MAX, (a_GCPtrMem))983 iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem)) 984 984 # define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \ 985 985 iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), UINT8_MAX, (a_GCPtrMem)) … … 1932 1932 #endif 1933 1933 1934 /** int32_t alias. */ 1935 #ifndef IEM_WITH_SETJMP 1936 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1937 IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 1938 #else 1939 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1940 (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1941 #endif 1942 1943 /** Flat int32_t alias. */ 1944 #ifndef IEM_WITH_SETJMP 1945 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1946 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) 1947 #else 1948 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1949 (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1950 #endif 1951 1952 /** RTFLOAT32U alias. */ 1953 #ifndef IEM_WITH_SETJMP 1954 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1955 IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) 1956 #else 1957 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1958 (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 1959 #endif 1960 1961 /** Flat RTFLOAT32U alias. */ 1962 #ifndef IEM_WITH_SETJMP 1963 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1964 IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) 1965 #else 1966 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1967 (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 1968 #endif 1969 1934 1970 1935 1971 /* 64-bit */ … … 2062 2098 2063 2099 2100 /* misc */ 2101 2102 /** 2103 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess. 2104 * 2105 * @param[out] a_pr80Mem Where to return the pointer to the mapping. 2106 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2107 * @param[in] a_iSeg The segment register to access via. No UINT8_MAX! 2108 * @param[in] a_GCPtrMem The memory address. 2109 * @remarks Will return/long jump on errors. 2110 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2111 */ 2112 #ifndef IEM_WITH_SETJMP 2113 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2114 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), (a_iSeg), \ 2115 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2116 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2117 } while (0) 2118 #else 2119 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2120 (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem)) 2121 #endif 2122 2123 /** 2124 * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess. 2125 * 2126 * @param[out] a_pr80Mem Where to return the pointer to the mapping. 2127 * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t. 2128 * @param[in] a_GCPtrMem The memory address. 2129 * @remarks Will return/long jump on errors. 2130 * @see IEM_MC_MEM_COMMIT_AND_UNMAP_WO 2131 */ 2132 #ifndef IEM_WITH_SETJMP 2133 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2134 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), UINT8_MAX, \ 2135 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2136 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2137 } while (0) 2138 #else 2139 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2140 (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem)) 2141 #endif 2142 2143 2064 2144 /* commit + unmap */ 2065 2145 … … 2112 2192 * 2113 2193 * @remarks May in theory return - for now. 2194 * 2195 * @deprecated 2114 2196 */ 2115 2197 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \ … … 2120 2202 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \ 2121 2203 } while (0) 2204 2205 2206 /** Commits the memory and unmaps the guest memory unless the FPU status word 2207 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception 2208 * that would cause FLD not to store. 2209 * 2210 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a 2211 * store, while \#P will not. 2212 * 2213 * @remarks May in theory return - for now. 2214 */ 2215 #ifndef IEM_WITH_SETJMP 2216 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2217 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2218 if ( !(a_u16FSW & X86_FSW_ES) \ 2219 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2220 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2221 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \ 2222 else \ 2223 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2224 } while (0) 2225 #else 2226 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2227 if ( !(a_u16FSW & X86_FSW_ES) \ 2228 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2229 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2230 iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), a_bMapInfo); \ 2231 else \ 2232 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo); \ 2233 } while (0) 2234 #endif 2235 2236 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory. */ 2237 #ifndef IEM_WITH_SETJMP 2238 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \ 2239 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2240 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2241 } while (0) 2242 #else 2243 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \ 2244 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo) 2245 #endif 2122 2246 2123 2247 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r102397 r102424 939 939 #define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t const *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 940 940 #define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu32Mem); (a_pu32Mem) = NULL; CHK_PTYPE(uint32_t *, a_pu32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 941 #define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pi32Mem); (a_pi32Mem) = NULL; CHK_PTYPE(int32_t *, a_pi32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 942 #define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr32Mem); (a_pr32Mem) = NULL; CHK_PTYPE(RTFLOAT32U *, a_pr32Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 941 943 #define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 942 944 #define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t const *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 943 945 #define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pu64Mem); (a_pu64Mem) = NULL; CHK_PTYPE(uint64_t *, a_pu64Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 946 #define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { CHK_VAR(a_pr80Mem); (a_pr80Mem) = NULL; CHK_PTYPE(RTFLOAT80U *, a_pr80Mem); CHK_VAR(a_bUnmapInfo); CHK_TYPE(uint8_t, a_bUnmapInfo); a_bUnmapInfo = 1; CHK_GCPTR(a_GCPtrMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) 944 947 945 948 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) do { CHK_VAR(a_pvMem); CHK_VAR(a_bMapInfo); CHK_TYPE(uint8_t, a_bMapInfo); (void)fMcBegin; } while (0) 946 949 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) do { CHK_VAR(a_pvMem); CHK_VAR(a_bMapInfo); CHK_TYPE(uint8_t, a_bMapInfo); (void)fMcBegin; } while (0) 947 950 #define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { CHK_VAR(a_pvMem); CHK_VAR(a_bMapInfo); CHK_TYPE(uint8_t, a_bMapInfo); (void)fMcBegin; } while (0) 951 #define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { CHK_VAR(a_pvMem); CHK_VAR(a_bMapInfo); CHK_TYPE(uint8_t, a_bMapInfo); (void)fMcBegin; } while (0) 948 952 949 953 #define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) do { CHK_VAR(a_pMem); CHK_VAR(a_GCPtrMem); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0) … … 951 955 #define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) do { CHK_VAR(a_pvMem); (void)fMcBegin; } while (0) 952 956 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) do { CHK_VAR(a_pvMem); (void)fMcBegin; } while (0) 957 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { CHK_VAR(a_pvMem); CHK_VAR(a_bMapInfo); CHK_VAR(a_u16FSW); (void)fMcBegin; } while (0) 953 958 #define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) do { (a_GCPtrEff) = 0; CHK_GCPTR(a_GCPtrEff); (void)fMcBegin; } while (0) 954 959 #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) do { (void)fMcBegin; } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.