Changeset 95410 in vbox
- Timestamp:
- Jun 28, 2022 6:33:26 PM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 152012
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r95307 r95410 1896 1896 uint16_t *pu16Frame; 1897 1897 uint64_t uNewRsp; 1898 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);1898 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp); 1899 1899 if (rcStrict != VINF_SUCCESS) 1900 1900 return rcStrict; … … 2209 2209 * not perform correct translation if this happens. See Intel spec. 7.2.1 2210 2210 * "Task-State Segment". */ 2211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW );2211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0); 2212 2212 if (rcStrict != VINF_SUCCESS) 2213 2213 { … … 2226 2226 PX86DESC pDescCurTSS; 2227 2227 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX, 2228 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW );2228 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0); 2229 2229 if (rcStrict != VINF_SUCCESS) 2230 2230 { … … 2274 2274 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip); 2275 2275 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64); 2276 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW );2276 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0); 2277 2277 if (rcStrict != VINF_SUCCESS) 2278 2278 { … … 2318 2318 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip); 2319 2319 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28); 2320 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW );2320 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0); 2321 2321 if (rcStrict != VINF_SUCCESS) 2322 2322 { … … 2437 2437 { 2438 2438 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX, 2439 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW );2439 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0); 2440 2440 if (rcStrict != VINF_SUCCESS) 2441 2441 { … … 3159 3159 RTPTRUNION uStackFrame; 3160 3160 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 3161 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 3161 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), 3162 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */ 3162 3163 if (rcStrict != VINF_SUCCESS) 3163 3164 return rcStrict; … … 3261 3262 RTPTRUNION uStackFrame; 3262 3263 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate; 3263 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);3264 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp); 3264 3265 if (rcStrict != VINF_SUCCESS) 3265 3266 return rcStrict; … … 3512 3513 RTPTRUNION uStackFrame; 3513 3514 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 3514 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS ); /* _SYS is a hack ... */3515 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */ 3515 3516 if (rcStrict != VINF_SUCCESS) 3516 3517 return rcStrict; … … 4134 4135 VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) 4135 4136 { 4136 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT , 0, 0);4137 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0); 4137 4138 } 4138 4139 … … 5784 5785 * @returns VBox strict status code. 5785 5786 * 5786 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5787 * @param ppvMem Where to return the pointer to the mapped 5788 * memory. 5789 * @param cbMem The number of bytes to map. This is usually 1, 5790 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by 5791 * string operations it can be up to a page. 5792 * @param iSegReg The index of the segment register to use for 5793 * this access. The base and limits are checked. 5794 * Use UINT8_MAX to indicate that no segmentation 5795 * is required (for IDT, GDT and LDT accesses). 5796 * @param GCPtrMem The address of the guest memory. 5797 * @param fAccess How the memory is being accessed. The 5798 * IEM_ACCESS_TYPE_XXX bit is used to figure out 5799 * how to map the memory, while the 5800 * IEM_ACCESS_WHAT_XXX bit is used when raising 5801 * exceptions. 5802 */ 5803 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT 5787 * @param pVCpu The cross context virtual CPU structure of the calling thread. 5788 * @param ppvMem Where to return the pointer to the mapped memory. 5789 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6, 5790 * 8, 12, 16, 32 or 512. When used by string operations 5791 * it can be up to a page. 5792 * @param iSegReg The index of the segment register to use for this 5793 * access. The base and limits are checked. Use UINT8_MAX 5794 * to indicate that no segmentation is required (for IDT, 5795 * GDT and LDT accesses). 5796 * @param GCPtrMem The address of the guest memory. 5797 * @param fAccess How the memory is being accessed. The 5798 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map 5799 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used 5800 * when raising exceptions. 5801 * @param uAlignCtl Alignment control: 5802 * - Bits 15:0 is the alignment mask. 5803 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP, 5804 * IEM_MEMMAP_F_ALIGN_SSE, and 5805 * IEM_MEMMAP_F_ALIGN_GP_OR_AC. 5806 * Pass zero to skip alignment. 5807 */ 5808 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, 5809 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT 5804 5810 { 5805 5811 /* … … 5836 5842 else 5837 5843 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess); 5844 5845 /* 5846 * Alignment check. 5847 */ 5848 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 ) 5849 { /* likelyish */ } 5850 else 5851 { 5852 /* Misaligned access. */ 5853 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS) 5854 { 5855 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP) 5856 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE) 5857 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) ) 5858 { 5859 AssertCompile(X86_CR0_AM == X86_EFL_AC); 5860 5861 if (iemMemAreAlignmentChecksEnabled(pVCpu)) 5862 return iemRaiseAlignmentCheckException(pVCpu); 5863 } 5864 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC) 5865 && iemMemAreAlignmentChecksEnabled(pVCpu) 5866 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU 5867 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */ 5868 ) 5869 return iemRaiseAlignmentCheckException(pVCpu); 5870 else 5871 return iemRaiseGeneralProtectionFault0(pVCpu); 5872 } 5873 } 5838 5874 5839 5875 #ifdef IEM_WITH_DATA_TLB … … 6072 6108 * @returns Pointer to the mapped memory. 6073 6109 * 6074 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6075 * @param cbMem The number of bytes to map. This is usually 1, 6076 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by 6077 * string operations it can be up to a page. 6078 * @param iSegReg The index of the segment register to use for 6079 * this access. The base and limits are checked. 6080 * Use UINT8_MAX to indicate that no segmentation 6081 * is required (for IDT, GDT and LDT accesses). 6082 * @param GCPtrMem The address of the guest memory. 6083 * @param fAccess How the memory is being accessed. The 6084 * IEM_ACCESS_TYPE_XXX bit is used to figure out 6085 * how to map the memory, while the 6086 * IEM_ACCESS_WHAT_XXX bit is used when raising 6087 * exceptions. 6088 */ 6089 void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT 6110 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6111 * @param cbMem The number of bytes to map. This is usually 1, 6112 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by 6113 * string operations it can be up to a page. 6114 * @param iSegReg The index of the segment register to use for 6115 * this access. The base and limits are checked. 6116 * Use UINT8_MAX to indicate that no segmentation 6117 * is required (for IDT, GDT and LDT accesses). 6118 * @param GCPtrMem The address of the guest memory. 6119 * @param fAccess How the memory is being accessed. The 6120 * IEM_ACCESS_TYPE_XXX bit is used to figure out 6121 * how to map the memory, while the 6122 * IEM_ACCESS_WHAT_XXX bit is used when raising 6123 * exceptions. 6124 * @param uAlignCtl Alignment control: 6125 * - Bits 15:0 is the alignment mask. 6126 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP, 6127 * IEM_MEMMAP_F_ALIGN_SSE, and 6128 * IEM_MEMMAP_F_ALIGN_GP_OR_AC. 6129 * Pass zero to skip alignment. 6130 */ 6131 void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess, 6132 uint32_t uAlignCtl) RT_NOEXCEPT 6090 6133 { 6091 6134 /* … … 6100 6143 if (rcStrict == VINF_SUCCESS) { /*likely*/ } 6101 6144 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 6145 6146 /* 6147 * Alignment check. 6148 */ 6149 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 ) 6150 { /* likelyish */ } 6151 else 6152 { 6153 /* Misaligned access. */ 6154 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS) 6155 { 6156 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP) 6157 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE) 6158 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) ) 6159 { 6160 AssertCompile(X86_CR0_AM == X86_EFL_AC); 6161 6162 if (iemMemAreAlignmentChecksEnabled(pVCpu)) 6163 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 6164 } 6165 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC) 6166 && iemMemAreAlignmentChecksEnabled(pVCpu) 6167 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU 6168 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */ 6169 ) 6170 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 6171 else 6172 iemRaiseGeneralProtectionFault0Jmp(pVCpu); 6173 } 6174 } 6102 6175 6103 6176 /* … … 6450 6523 /* The lazy approach for now... */ 6451 6524 uint8_t const *pu8Src; 6452 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R );6525 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0); 6453 6526 if (rc == VINF_SUCCESS) 6454 6527 { … … 6473 6546 { 6474 6547 /* The lazy approach for now... */ 6475 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R );6548 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0); 6476 6549 uint8_t const bRet = *pu8Src; 6477 6550 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R); … … 6495 6568 /* The lazy approach for now... */ 6496 6569 uint16_t const *pu16Src; 6497 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6570 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, 6571 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1); 6498 6572 if (rc == VINF_SUCCESS) 6499 6573 { … … 6518 6592 { 6519 6593 /* The lazy approach for now... */ 6520 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6594 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 6595 sizeof(*pu16Src) - 1); 6521 6596 uint16_t const u16Ret = *pu16Src; 6522 6597 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R); … … 6540 6615 /* The lazy approach for now... */ 6541 6616 uint32_t const *pu32Src; 6542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6617 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, 6618 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1); 6543 6619 if (rc == VINF_SUCCESS) 6544 6620 { … … 6564 6640 /* The lazy approach for now... */ 6565 6641 uint32_t const *pu32Src; 6566 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6642 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, 6643 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1); 6567 6644 if (rc == VINF_SUCCESS) 6568 6645 { … … 6587 6664 uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT 6588 6665 { 6589 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6666 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 6667 sizeof(*pu32Src) - 1); 6590 6668 uint32_t const u32Ret = *pu32Src; 6591 6669 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); … … 6657 6735 6658 6736 # else 6659 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6737 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, 6738 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1); 6660 6739 uint32_t const u32Ret = *pu32Src; 6661 6740 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R); … … 6681 6760 /* The lazy approach for now... */ 6682 6761 int32_t const *pi32Src; 6683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6762 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, 6763 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1); 6684 6764 if (rc == VINF_SUCCESS) 6685 6765 { … … 6710 6790 /* The lazy approach for now... */ 6711 6791 uint64_t const *pu64Src; 6712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6792 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, 6793 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1); 6713 6794 if (rc == VINF_SUCCESS) 6714 6795 { … … 6733 6814 { 6734 6815 /* The lazy approach for now... */ 6735 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6816 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, 6817 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1); 6736 6818 uint64_t const u64Ret = *pu64Src; 6737 6819 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); … … 6754 6836 { 6755 6837 /* The lazy approach for now... */ 6756 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */6757 if (RT_UNLIKELY(GCPtrMem & 15))6758 return iemRaiseGeneralProtectionFault0(pVCpu);6759 6760 6838 uint64_t const *pu64Src; 6761 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6839 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, 6840 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 6762 6841 if (rc == VINF_SUCCESS) 6763 6842 { … … 6782 6861 { 6783 6862 /* The lazy approach for now... */ 6784 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 6785 if (RT_LIKELY(!(GCPtrMem & 15))) 6786 { 6787 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6788 uint64_t const u64Ret = *pu64Src; 6789 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 6790 return u64Ret; 6791 } 6792 6793 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu); 6794 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc)); 6863 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 6864 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 6865 uint64_t const u64Ret = *pu64Src; 6866 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 6867 return u64Ret; 6795 6868 } 6796 6869 #endif … … 6811 6884 /* The lazy approach for now... */ 6812 6885 PCRTFLOAT80U pr80Src; 6813 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, 6887 IEM_ACCESS_DATA_R, 7 /** @todo FLD alignment check */ ); 6814 6888 if (rc == VINF_SUCCESS) 6815 6889 { … … 6834 6908 { 6835 6909 /* The lazy approach for now... */ 6836 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6910 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, 6911 IEM_ACCESS_DATA_R, 7 /** @todo FLD alignment check */); 6837 6912 *pr80Dst = *pr80Src; 6838 6913 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R); … … 6855 6930 /* The lazy approach for now... */ 6856 6931 PCRTPBCD80U pd80Src; 6857 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6932 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, 6933 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */); 6858 6934 if (rc == VINF_SUCCESS) 6859 6935 { … … 6878 6954 { 6879 6955 /* The lazy approach for now... */ 6880 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6956 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, 6957 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */); 6881 6958 *pd80Dst = *pd80Src; 6882 6959 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R); … … 6899 6976 /* The lazy approach for now... */ 6900 6977 PCRTUINT128U pu128Src; 6901 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6978 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, 6979 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */); 6902 6980 if (rc == VINF_SUCCESS) 6903 6981 { … … 6923 7001 { 6924 7002 /* The lazy approach for now... */ 6925 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7003 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, 7004 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */); 6926 7005 pu128Dst->au64[0] = pu128Src->au64[0]; 6927 7006 pu128Dst->au64[1] = pu128Src->au64[1]; … … 6947 7026 { 6948 7027 /* The lazy approach for now... */ 6949 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */6950 if ( (GCPtrMem & 15)6951 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */6952 return iemRaiseGeneralProtectionFault0(pVCpu);6953 6954 7028 PCRTUINT128U pu128Src; 6955 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7029 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, 7030 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 6956 7031 if (rc == VINF_SUCCESS) 6957 7032 { … … 6980 7055 { 6981 7056 /* The lazy approach for now... */ 6982 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 6983 if ( (GCPtrMem & 15) == 0 6984 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 6985 { 6986 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 6987 pu128Dst->au64[0] = pu128Src->au64[0]; 6988 pu128Dst->au64[1] = pu128Src->au64[1]; 6989 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 6990 return; 6991 } 6992 6993 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu); 6994 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7057 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7058 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7059 pu128Dst->au64[0] = pu128Src->au64[0]; 7060 pu128Dst->au64[1] = pu128Src->au64[1]; 7061 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 6995 7062 } 6996 7063 #endif … … 7011 7078 /* The lazy approach for now... */ 7012 7079 PCRTUINT256U pu256Src; 7013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7080 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, 7081 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */); 7014 7082 if (rc == VINF_SUCCESS) 7015 7083 { … … 7037 7105 { 7038 7106 /* The lazy approach for now... */ 7039 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7107 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, 7108 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */); 7040 7109 pu256Dst->au64[0] = pu256Src->au64[0]; 7041 7110 pu256Dst->au64[1] = pu256Src->au64[1]; … … 7063 7132 { 7064 7133 /* The lazy approach for now... */ 7065 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */7066 if (GCPtrMem & 31)7067 return iemRaiseGeneralProtectionFault0(pVCpu);7068 7069 7134 PCRTUINT256U pu256Src; 7070 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7135 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, 7136 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7071 7137 if (rc == VINF_SUCCESS) 7072 7138 { … … 7097 7163 { 7098 7164 /* The lazy approach for now... */ 7099 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */ 7100 if ((GCPtrMem & 31) == 0) 7101 { 7102 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 7103 pu256Dst->au64[0] = pu256Src->au64[0]; 7104 pu256Dst->au64[1] = pu256Src->au64[1]; 7105 pu256Dst->au64[2] = pu256Src->au64[2]; 7106 pu256Dst->au64[3] = pu256Src->au64[3]; 7107 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R); 7108 return; 7109 } 7110 7111 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu); 7112 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7165 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7166 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7167 pu256Dst->au64[0] = pu256Src->au64[0]; 7168 pu256Dst->au64[1] = pu256Src->au64[1]; 7169 pu256Dst->au64[2] = pu256Src->au64[2]; 7170 pu256Dst->au64[3] = pu256Src->au64[3]; 7171 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R); 7113 7172 } 7114 7173 #endif … … 7202 7261 /* The lazy approach for now... */ 7203 7262 uint8_t *pu8Dst; 7204 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W );7263 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0); 7205 7264 if (rc == VINF_SUCCESS) 7206 7265 { … … 7225 7284 { 7226 7285 /* The lazy approach for now... */ 7227 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W );7286 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0); 7228 7287 *pu8Dst = u8Value; 7229 7288 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W); … … 7246 7305 /* The lazy approach for now... */ 7247 7306 uint16_t *pu16Dst; 7248 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7307 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, 7308 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1); 7249 7309 if (rc == VINF_SUCCESS) 7250 7310 { … … 7269 7329 { 7270 7330 /* The lazy approach for now... */ 7271 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7331 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, 7332 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1); 7272 7333 *pu16Dst = u16Value; 7273 7334 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W); … … 7290 7351 /* The lazy approach for now... */ 7291 7352 uint32_t *pu32Dst; 7292 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7353 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, 7354 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1); 7293 7355 if (rc == VINF_SUCCESS) 7294 7356 { … … 7314 7376 { 7315 7377 /* The lazy approach for now... */ 7316 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7378 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, 7379 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1); 7317 7380 *pu32Dst = u32Value; 7318 7381 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W); … … 7335 7398 /* The lazy approach for now... */ 7336 7399 uint64_t *pu64Dst; 7337 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7400 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, 7401 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1); 7338 7402 if (rc == VINF_SUCCESS) 7339 7403 { … … 7358 7422 { 7359 7423 /* The lazy approach for now... */ 7360 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7424 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, 7425 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1); 7361 7426 *pu64Dst = u64Value; 7362 7427 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W); … … 7379 7444 /* The lazy approach for now... */ 7380 7445 PRTUINT128U pu128Dst; 7381 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7446 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, 7447 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */); 7382 7448 if (rc == VINF_SUCCESS) 7383 7449 { … … 7403 7469 { 7404 7470 /* The lazy approach for now... */ 7405 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7471 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, 7472 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */); 7406 7473 pu128Dst->au64[0] = u128Value.au64[0]; 7407 7474 pu128Dst->au64[1] = u128Value.au64[1]; … … 7424 7491 { 7425 7492 /* The lazy approach for now... */ 7426 if ( (GCPtrMem & 15)7427 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */7428 return iemRaiseGeneralProtectionFault0(pVCpu);7429 7430 7493 PRTUINT128U pu128Dst; 7431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7494 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 7495 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7432 7496 if (rc == VINF_SUCCESS) 7433 7497 { … … 7454 7518 { 7455 7519 /* The lazy approach for now... */ 7456 if ( (GCPtrMem & 15) == 0 7457 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */ 7458 { 7459 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7460 pu128Dst->au64[0] = u128Value.au64[0]; 7461 pu128Dst->au64[1] = u128Value.au64[1]; 7462 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 7463 return; 7464 } 7465 7466 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu); 7467 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7520 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 7521 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7522 pu128Dst->au64[0] = u128Value.au64[0]; 7523 pu128Dst->au64[1] = u128Value.au64[1]; 7524 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W); 7468 7525 } 7469 7526 #endif … … 7484 7541 /* The lazy approach for now... */ 7485 7542 PRTUINT256U pu256Dst; 7486 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7543 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7544 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */); 7487 7545 if (rc == VINF_SUCCESS) 7488 7546 { … … 7510 7568 { 7511 7569 /* The lazy approach for now... */ 7512 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7570 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7571 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */); 7513 7572 pu256Dst->au64[0] = pu256Value->au64[0]; 7514 7573 pu256Dst->au64[1] = pu256Value->au64[1]; … … 7521 7580 7522 7581 /** 7523 * Stores a data dqword, AVX aligned.7582 * Stores a data dqword, AVX \#GP(0) aligned. 7524 7583 * 7525 7584 * @returns Strict VBox status code. … … 7533 7592 { 7534 7593 /* The lazy approach for now... */ 7535 if (GCPtrMem & 31)7536 return iemRaiseGeneralProtectionFault0(pVCpu);7537 7538 7594 PRTUINT256U pu256Dst; 7539 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7595 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7596 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP); 7540 7597 if (rc == VINF_SUCCESS) 7541 7598 { … … 7564 7621 { 7565 7622 /* The lazy approach for now... */ 7566 if ((GCPtrMem & 31) == 0) 7567 { 7568 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W); 7569 pu256Dst->au64[0] = pu256Value->au64[0]; 7570 pu256Dst->au64[1] = pu256Value->au64[1]; 7571 pu256Dst->au64[2] = pu256Value->au64[2]; 7572 pu256Dst->au64[3] = pu256Value->au64[3]; 7573 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 7574 return; 7575 } 7576 7577 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu); 7578 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)); 7623 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7624 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP); 7625 pu256Dst->au64[0] = pu256Value->au64[0]; 7626 pu256Dst->au64[1] = pu256Value->au64[1]; 7627 pu256Dst->au64[2] = pu256Value->au64[2]; 7628 pu256Dst->au64[3] = pu256Value->au64[3]; 7629 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W); 7579 7630 } 7580 7631 #endif … … 7596 7647 /* 7597 7648 * The SIDT and SGDT instructions actually stores the data using two 7598 * independent writes. The instructions does not respond to opsize prefixes. 7649 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions 7650 * does not respond to opsize prefixes. 7599 7651 */ 7600 7652 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit); … … 7629 7681 /* Write the word the lazy way. */ 7630 7682 uint16_t *pu16Dst; 7631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 7683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, 7684 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1); 7632 7685 if (rc == VINF_SUCCESS) 7633 7686 { … … 7659 7712 /* Write the dword the lazy way. */ 7660 7713 uint32_t *pu32Dst; 7661 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 7714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, 7715 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1); 7662 7716 if (rc == VINF_SUCCESS) 7663 7717 { … … 7698 7752 * ancient hardware when it actually did change. */ 7699 7753 uint16_t *pu16Dst; 7700 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW); 7754 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, 7755 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */ 7701 7756 if (rc == VINF_SUCCESS) 7702 7757 { … … 7728 7783 /* Write the word the lazy way. */ 7729 7784 uint64_t *pu64Dst; 7730 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 7785 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, 7786 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1); 7731 7787 if (rc == VINF_SUCCESS) 7732 7788 { … … 7758 7814 /* Write the word the lazy way. */ 7759 7815 uint16_t const *pu16Src; 7760 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 7816 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, 7817 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1); 7761 7818 if (rc == VINF_SUCCESS) 7762 7819 { … … 7788 7845 /* Write the word the lazy way. */ 7789 7846 uint32_t const *pu32Src; 7790 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 7847 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, 7848 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1); 7791 7849 if (rc == VINF_SUCCESS) 7792 7850 { … … 7818 7876 /* Write the word the lazy way. */ 7819 7877 uint64_t const *pu64Src; 7820 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 7878 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, 7879 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1); 7821 7880 if (rc == VINF_SUCCESS) 7822 7881 { … … 7849 7908 /* Write the word the lazy way. */ 7850 7909 uint16_t *pu16Dst; 7851 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 7910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, 7911 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1); 7852 7912 if (rc == VINF_SUCCESS) 7853 7913 { … … 7880 7940 /* Write the word the lazy way. */ 7881 7941 uint32_t *pu32Dst; 7882 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 7942 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, 7943 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1); 7883 7944 if (rc == VINF_SUCCESS) 7884 7945 { … … 7911 7972 /* Write the word the lazy way. */ 7912 7973 uint64_t *pu64Dst; 7913 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 7974 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, 7975 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1); 7914 7976 if (rc == VINF_SUCCESS) 7915 7977 { … … 7942 8004 /* Write the word the lazy way. */ 7943 8005 uint16_t const *pu16Src; 7944 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 8006 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, 8007 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1); 7945 8008 if (rc == VINF_SUCCESS) 7946 8009 { … … 7973 8036 /* Write the word the lazy way. */ 7974 8037 uint32_t const *pu32Src; 7975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 8038 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, 8039 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1); 7976 8040 if (rc == VINF_SUCCESS) 7977 8041 { … … 8004 8068 /* Write the word the lazy way. */ 8005 8069 uint64_t const *pu64Src; 8006 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 8070 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, 8071 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1); 8007 8072 if (rcStrict == VINF_SUCCESS) 8008 8073 { … … 8027 8092 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8028 8093 * @param cbMem The number of bytes to push onto the stack. 8094 * @param cbAlign The alignment mask (7, 3, 1). 8029 8095 * @param ppvMem Where to return the pointer to the stack memory. 8030 8096 * As with the other memory functions this could be … … 8036 8102 * iemMemStackPushCommitSpecial(). 8037 8103 */ 8038 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT 8104 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 8105 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT 8039 8106 { 8040 8107 Assert(cbMem < UINT8_MAX); 8041 8108 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp); 8042 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 8109 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, 8110 IEM_ACCESS_STACK_W, cbAlign); 8043 8111 } 8044 8112 … … 8067 8135 /** 8068 8136 * Begin a special stack pop (used by iret, retf and such). 8137 * 8138 * This will raise \#SS or \#PF if appropriate. 8139 * 8140 * @returns Strict VBox status code. 8141 * @param pVCpu The cross context virtual CPU structure of the calling thread. 8142 * @param cbMem The number of bytes to pop from the stack. 8143 * @param cbAlign The alignment mask (7, 3, 1). 8144 * @param ppvMem Where to return the pointer to the stack memory. 8145 * @param puNewRsp Where to return the new RSP value. This must be 8146 * assigned to CPUMCTX::rsp manually some time 8147 * after iemMemStackPopDoneSpecial() has been 8148 * called. 8149 */ 8150 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 8151 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT 8152 { 8153 Assert(cbMem < UINT8_MAX); 8154 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp); 8155 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign); 8156 } 8157 8158 8159 /** 8160 * Continue a special stack pop (used by iret and retf). 8069 8161 * 8070 8162 * This will raise \#SS or \#PF if appropriate. … … 8079 8171 * called. 8080 8172 */ 8081 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT8082 {8083 Assert(cbMem < UINT8_MAX);8084 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);8085 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);8086 }8087 8088 8089 /**8090 * Continue a special stack pop (used by iret and retf).8091 *8092 * This will raise \#SS or \#PF if appropriate.8093 *8094 * @returns Strict VBox status code.8095 * @param pVCpu The cross context virtual CPU structure of the calling thread.8096 * @param cbMem The number of bytes to pop from the stack.8097 * @param ppvMem Where to return the pointer to the stack memory.8098 * @param puNewRsp Where to return the new RSP value. This must be8099 * assigned to CPUMCTX::rsp manually some time8100 * after iemMemStackPopDoneSpecial() has been8101 * called.8102 */8103 8173 VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT 8104 8174 { … … 8108 8178 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8); 8109 8179 *puNewRsp = NewRsp.u; 8110 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R); 8180 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, 8181 0 /* checked in iemMemStackPopBeginSpecial */); 8111 8182 } 8112 8183 … … 8144 8215 /* The lazy approach for now... */ 8145 8216 uint8_t const *pbSrc; 8146 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R );8217 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 8147 8218 if (rc == VINF_SUCCESS) 8148 8219 { … … 8168 8239 /* The lazy approach for now... */ 8169 8240 uint16_t const *pu16Src; 8170 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R );8241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 8171 8242 if (rc == VINF_SUCCESS) 8172 8243 { … … 8192 8263 /* The lazy approach for now... */ 8193 8264 uint32_t const *pu32Src; 8194 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R );8265 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 8195 8266 if (rc == VINF_SUCCESS) 8196 8267 { … … 8216 8287 /* The lazy approach for now... */ 8217 8288 uint64_t const *pu64Src; 8218 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R );8289 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 8219 8290 if (rc == VINF_SUCCESS) 8220 8291 { … … 8356 8427 /* The normal case, map the 32-bit bits around the accessed bit (40). */ 8357 8428 GCPtr += 2 + 2; 8358 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW );8429 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0); 8359 8430 if (rcStrict != VINF_SUCCESS) 8360 8431 return rcStrict; … … 8364 8435 { 8365 8436 /* The misaligned GDT/LDT case, map the whole thing. */ 8366 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW );8437 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0); 8367 8438 if (rcStrict != VINF_SUCCESS) 8368 8439 return rcStrict; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r95403 r95410 325 325 { 326 326 uint16_t const *pa16Mem = NULL; 327 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R );327 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1); 328 328 if (rcStrict == VINF_SUCCESS) 329 329 { … … 405 405 { 406 406 uint32_t const *pa32Mem; 407 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R );407 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1); 408 408 if (rcStrict == VINF_SUCCESS) 409 409 { … … 476 476 GCPtrBottom--; 477 477 uint16_t *pa16Mem = NULL; 478 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W );478 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1); 479 479 if (rcStrict == VINF_SUCCESS) 480 480 { … … 547 547 GCPtrBottom--; 548 548 uint32_t *pa32Mem; 549 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W );549 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1); 550 550 if (rcStrict == VINF_SUCCESS) 551 551 { … … 1327 1327 1328 1328 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack; 1329 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R );1329 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0); 1330 1330 if (rcStrict != VINF_SUCCESS) 1331 1331 { … … 1456 1456 void *pvNewFrame; 1457 1457 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack; 1458 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW );1458 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0); 1459 1459 if (rcStrict != VINF_SUCCESS) 1460 1460 { … … 1477 1477 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID; 1478 1478 pVCpu->cpum.GstCtx.rsp = uNewRsp; 1479 pVCpu->iem.s.uCpl = uNewCSDpl; 1479 pVCpu->iem.s.uCpl = uNewCSDpl; /** @todo is the parameter words accessed using the new CPL or the old CPL? */ 1480 1480 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss)); 1481 1481 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS); … … 1484 1484 /** @todo this can still fail due to SS.LIMIT not check. */ 1485 1485 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack, 1486 IEM_IS_LONG_MODE(pVCpu) ? 7 1487 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1, 1486 1488 &uPtrRet.pv, &uNewRsp); 1487 1489 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)), … … 1492 1494 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE) 1493 1495 { 1494 /* Push the old CS:rIP. */1495 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;1496 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */1497 1498 1496 if (cbWords) 1499 1497 { 1500 1498 /* Map the relevant chunk of the old stack. */ 1501 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R); 1499 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, 1500 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */); 1502 1501 if (rcStrict != VINF_SUCCESS) 1503 1502 { … … 1519 1518 } 1520 1519 1520 /* Push the old CS:rIP. */ 1521 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr; 1522 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */ 1523 1521 1524 /* Push the old SS:rSP. */ 1522 1525 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp; … … 1527 1530 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE); 1528 1531 1529 /* Push the old CS:rIP. */1530 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;1531 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;1532 1533 1532 if (cbWords) 1534 1533 { 1535 1534 /* Map the relevant chunk of the old stack. */ 1536 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R); 1535 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, 1536 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */); 1537 1537 if (rcStrict != VINF_SUCCESS) 1538 1538 { … … 1553 1553 } 1554 1554 } 1555 1556 /* Push the old CS:rIP. */ 1557 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr; 1558 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel; 1555 1559 1556 1560 /* Push the old SS:rSP. */ … … 1640 1644 IEM_IS_LONG_MODE(pVCpu) ? 8+8 1641 1645 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2, 1646 IEM_IS_LONG_MODE(pVCpu) ? 7 1647 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2, 1642 1648 &uPtrRet.pv, &uNewRsp); 1643 1649 if (rcStrict != VINF_SUCCESS) … … 1998 2004 /* Check stack first - may #SS(0). */ 1999 2005 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2, 2006 enmEffOpSize == IEMMODE_32BIT ? 3 : 1, 2000 2007 &uPtrRet.pv, &uNewRsp); 2001 2008 if (rcStrict != VINF_SUCCESS) … … 2104 2111 * 16-bit code cause a two or four byte CS to be pushed? */ 2105 2112 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 2106 enmEffOpSize == IEMMODE_64BIT ? 8+82107 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,2113 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2, 2114 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1, 2108 2115 &uPtrRet.pv, &uNewRsp); 2109 2116 if (rcStrict != VINF_SUCCESS) … … 2217 2224 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2 2218 2225 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8; 2219 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp); 2226 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, 2227 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7, 2228 &uPtrFrame.pv, &uNewRsp); 2220 2229 if (rcStrict != VINF_SUCCESS) 2221 2230 return rcStrict; … … 2926 2935 if (enmEffOpSize == IEMMODE_32BIT) 2927 2936 { 2928 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);2937 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp); 2929 2938 if (rcStrict != VINF_SUCCESS) 2930 2939 return rcStrict; … … 2945 2954 else 2946 2955 { 2947 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);2956 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp); 2948 2957 if (rcStrict != VINF_SUCCESS) 2949 2958 return rcStrict; … … 3210 3219 if (enmEffOpSize == IEMMODE_32BIT) 3211 3220 { 3212 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);3221 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp); 3213 3222 if (rcStrict != VINF_SUCCESS) 3214 3223 return rcStrict; … … 3219 3228 else 3220 3229 { 3221 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);3230 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp); 3222 3231 if (rcStrict != VINF_SUCCESS) 3223 3232 return rcStrict; … … 3566 3575 if (enmEffOpSize == IEMMODE_64BIT) 3567 3576 { 3568 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp);3577 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp); 3569 3578 if (rcStrict != VINF_SUCCESS) 3570 3579 return rcStrict; … … 3577 3586 else if (enmEffOpSize == IEMMODE_32BIT) 3578 3587 { 3579 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp);3588 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp); 3580 3589 if (rcStrict != VINF_SUCCESS) 3581 3590 return rcStrict; … … 3589 3598 { 3590 3599 Assert(enmEffOpSize == IEMMODE_16BIT); 3591 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp);3600 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp); 3592 3601 if (rcStrict != VINF_SUCCESS) 3593 3602 return rcStrict; … … 3955 3964 uint8_t const *pa8Mem; 3956 3965 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */ 3957 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R );3966 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0); 3958 3967 if (rcStrict != VINF_SUCCESS) 3959 3968 return rcStrict; … … 5453 5462 */ 5454 5463 void *pvDesc; 5455 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW); 5464 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), 5465 IEM_ACCESS_DATA_RW, 0); 5456 5466 if (rcStrict != VINF_SUCCESS) 5457 5467 return rcStrict; … … 8316 8326 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM)) 8317 8327 return iemRaiseDeviceNotAvailable(pVCpu); 8318 if (GCPtrEff & 15)8319 {8320 /** @todo CPU/VM detection possible! \#AC might not be signal for8321 * all/any misalignment sizes, intel says its an implementation detail. */8322 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)8323 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC8324 && pVCpu->iem.s.uCpl == 3)8325 return iemRaiseAlignmentCheckException(pVCpu);8326 return iemRaiseGeneralProtectionFault0(pVCpu);8327 }8328 8328 8329 8329 /* … … 8331 8331 */ 8332 8332 void *pvMem512; 8333 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 8333 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 8334 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 8334 8335 if (rcStrict != VINF_SUCCESS) 8335 8336 return rcStrict; … … 8419 8420 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM)) 8420 8421 return iemRaiseDeviceNotAvailable(pVCpu); 8421 if (GCPtrEff & 15)8422 {8423 /** @todo CPU/VM detection possible! \#AC might not be signal for8424 * all/any misalignment sizes, intel says its an implementation detail. */8425 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)8426 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC8427 && pVCpu->iem.s.uCpl == 3)8428 return iemRaiseAlignmentCheckException(pVCpu);8429 return iemRaiseGeneralProtectionFault0(pVCpu);8430 }8431 8422 8432 8423 /* … … 8434 8425 */ 8435 8426 void *pvMem512; 8436 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R); 8427 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R, 8428 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 8437 8429 if (rcStrict != VINF_SUCCESS) 8438 8430 return rcStrict; … … 8540 8532 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) 8541 8533 return iemRaiseDeviceNotAvailable(pVCpu); 8542 if (GCPtrEff & 63)8543 {8544 /** @todo CPU/VM detection possible! \#AC might not be signal for8545 * all/any misalignment sizes, intel says its an implementation detail. */8546 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)8547 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC8548 && pVCpu->iem.s.uCpl == 3)8549 return iemRaiseAlignmentCheckException(pVCpu);8550 return iemRaiseGeneralProtectionFault0(pVCpu);8551 }8552 8534 8553 8535 /* … … 8568 8550 /* The x87+SSE state. */ 8569 8551 void *pvMem512; 8570 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 8552 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 8553 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 8571 8554 if (rcStrict != VINF_SUCCESS) 8572 8555 return rcStrict; … … 8576 8559 /* The header. */ 8577 8560 PX86XSAVEHDR pHdr; 8578 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW );8561 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */); 8579 8562 if (rcStrict != VINF_SUCCESS) 8580 8563 return rcStrict; … … 8648 8631 PX86XSAVEYMMHI pCompDst; 8649 8632 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], 8650 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE );8633 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */); 8651 8634 if (rcStrict != VINF_SUCCESS) 8652 8635 return rcStrict; … … 8722 8705 /* The x87+SSE state. */ 8723 8706 void *pvMem512; 8724 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R); 8707 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R, 8708 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 8725 8709 if (rcStrict != VINF_SUCCESS) 8726 8710 return rcStrict; … … 8733 8717 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr; 8734 8718 PCX86XSAVEHDR pHdrSrc; 8735 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R); 8719 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, 8720 IEM_ACCESS_DATA_R, 0 /* checked above */); 8736 8721 if (rcStrict != VINF_SUCCESS) 8737 8722 return rcStrict; … … 8857 8842 PCX86XSAVEYMMHI pCompSrc; 8858 8843 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst), 8859 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R); 8844 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], 8845 IEM_ACCESS_DATA_R, 0 /* checked above */); 8860 8846 if (rcStrict != VINF_SUCCESS) 8861 8847 return rcStrict; … … 9156 9142 RTPTRUNION uPtr; 9157 9143 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, 9158 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 9144 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 9145 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */); 9159 9146 if (rcStrict != VINF_SUCCESS) 9160 9147 return rcStrict; … … 9185 9172 RTPTRUNION uPtr; 9186 9173 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, 9187 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE );9174 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */); 9188 9175 if (rcStrict != VINF_SUCCESS) 9189 9176 return rcStrict; … … 9235 9222 RTCPTRUNION uPtr; 9236 9223 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, 9237 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R); 9224 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 9225 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/); 9238 9226 if (rcStrict != VINF_SUCCESS) 9239 9227 return rcStrict; … … 9262 9250 RTCPTRUNION uPtr; 9263 9251 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, 9264 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R );9252 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ ); 9265 9253 if (rcStrict != VINF_SUCCESS) 9266 9254 return rcStrict; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r94768 r95410 1231 1231 1232 1232 OP_TYPE *puMem; 1233 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI, IEM_ACCESS_DATA_W); 1233 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI, 1234 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1); 1234 1235 if (rcStrict != VINF_SUCCESS) 1235 1236 return rcStrict; … … 1421 1422 { 1422 1423 OP_TYPE *puMem; 1423 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W); 1424 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, 1425 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1); 1424 1426 if (rcStrict != VINF_SUCCESS) 1425 1427 return rcStrict; -
trunk/src/VBox/VMM/include/IEMInline.h
r94838 r95410 2198 2198 */ 2199 2199 2200 2201 /** 2202 * Checks whether alignment checks are enabled or not. 2203 * 2204 * @returns true if enabled, false if not. 2205 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2206 */ 2207 DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) 2208 { 2209 AssertCompile(X86_CR0_AM == X86_EFL_AC); 2210 return pVCpu->iem.s.uCpl == 3 2211 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM); 2212 } 2213 2200 2214 /** 2201 2215 * Checks if the given segment can be written to, raise the appropriate -
trunk/src/VBox/VMM/include/IEMInternal.h
r95403 r95410 2811 2811 /** @name Memory access. 2812 2812 * @{ */ 2813 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT; 2813 2814 /** Report a \#GP instead of \#AC and do not restrict to ring-3 */ 2815 #define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16) 2816 /** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1 2817 * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */ 2818 #define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17) 2819 /** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP. 2820 * Users include FXSAVE & FXRSTOR. */ 2821 #define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18) 2822 2823 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, 2824 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT; 2814 2825 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT; 2815 2826 #ifndef IN_RING3 … … 2899 2910 #endif 2900 2911 2901 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT; 2912 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 2913 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT; 2902 2914 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT; 2903 2915 VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT; … … 2908 2920 VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT; 2909 2921 VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT; 2910 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT; 2922 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 2923 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT; 2911 2924 VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT; 2912 2925 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT; -
trunk/src/VBox/VMM/include/IEMMc.h
r95403 r95410 883 883 */ 884 884 #define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \ 885 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess))) 885 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), \ 886 (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1)) 886 887 887 888 /** Maps guest memory for direct or bounce buffered access. … … 890 891 */ 891 892 #define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \ 892 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess))) 893 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), \ 894 (a_GCPtrMem), (a_fAccess), (a_cbMem) - 1)) 893 895 894 896 /** Commits the memory and unmaps the guest memory.
Note:
See TracChangeset
for help on using the changeset viewer.