VirtualBox

Changeset 95410 in vbox


Ignore:
Timestamp:
Jun 28, 2022 6:33:26 PM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
152012
Message:

VMM/IEM: Alignment checks (#AC(0)/#GP(0)). bugref:9898

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r95307 r95410  
    18961896    uint16_t *pu16Frame;
    18971897    uint64_t  uNewRsp;
    1898     rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
     1898    rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
    18991899    if (rcStrict != VINF_SUCCESS)
    19001900        return rcStrict;
     
    22092209     *        not perform correct translation if this happens. See Intel spec. 7.2.1
    22102210     *        "Task-State Segment". */
    2211     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
     2211    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
    22122212    if (rcStrict != VINF_SUCCESS)
    22132213    {
     
    22262226        PX86DESC pDescCurTSS;
    22272227        rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
    2228                              pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
     2228                             pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
    22292229        if (rcStrict != VINF_SUCCESS)
    22302230        {
     
    22742274        uint32_t const cbCurTSS  = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
    22752275        AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
    2276         rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
     2276        rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
    22772277        if (rcStrict != VINF_SUCCESS)
    22782278        {
     
    23182318        uint32_t const cbCurTSS  = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
    23192319        AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
    2320         rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
     2320        rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
    23212321        if (rcStrict != VINF_SUCCESS)
    23222322        {
     
    24372437    {
    24382438        rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
    2439                              pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
     2439                             pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
    24402440        if (rcStrict != VINF_SUCCESS)
    24412441        {
     
    31593159        RTPTRUNION uStackFrame;
    31603160        rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
    3161                              uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
     3161                             uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
     3162                             IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
    31623163        if (rcStrict != VINF_SUCCESS)
    31633164            return rcStrict;
     
    32613262        RTPTRUNION      uStackFrame;
    32623263        uint8_t const   cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
    3263         rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
     3264        rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
    32643265        if (rcStrict != VINF_SUCCESS)
    32653266            return rcStrict;
     
    35123513    RTPTRUNION uStackFrame;
    35133514    rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
    3514                          uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
     3515                         uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
    35153516    if (rcStrict != VINF_SUCCESS)
    35163517        return rcStrict;
     
    41344135VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
    41354136{
    4136     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
     4137    return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    41374138}
    41384139
     
    57845785 * @returns VBox strict status code.
    57855786 *
    5786  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    5787  * @param   ppvMem              Where to return the pointer to the mapped
    5788  *                              memory.
    5789  * @param   cbMem               The number of bytes to map.  This is usually 1,
    5790  *                              2, 4, 6, 8, 12, 16, 32 or 512.  When used by
    5791  *                              string operations it can be up to a page.
    5792  * @param   iSegReg             The index of the segment register to use for
    5793  *                              this access.  The base and limits are checked.
    5794  *                              Use UINT8_MAX to indicate that no segmentation
    5795  *                              is required (for IDT, GDT and LDT accesses).
    5796  * @param   GCPtrMem            The address of the guest memory.
    5797  * @param   fAccess             How the memory is being accessed.  The
    5798  *                              IEM_ACCESS_TYPE_XXX bit is used to figure out
    5799  *                              how to map the memory, while the
    5800  *                              IEM_ACCESS_WHAT_XXX bit is used when raising
    5801  *                              exceptions.
    5802  */
    5803 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
     5787 * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
     5788 * @param   ppvMem      Where to return the pointer to the mapped memory.
     5789 * @param   cbMem       The number of bytes to map.  This is usually 1, 2, 4, 6,
     5790 *                      8, 12, 16, 32 or 512.  When used by string operations
     5791 *                      it can be up to a page.
     5792 * @param   iSegReg     The index of the segment register to use for this
     5793 *                      access.  The base and limits are checked. Use UINT8_MAX
     5794 *                      to indicate that no segmentation is required (for IDT,
     5795 *                      GDT and LDT accesses).
     5796 * @param   GCPtrMem    The address of the guest memory.
     5797 * @param   fAccess     How the memory is being accessed.  The
     5798 *                      IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
     5799 *                      the memory, while the IEM_ACCESS_WHAT_XXX bit is used
     5800 *                      when raising exceptions.
     5801 * @param   uAlignCtl   Alignment control:
     5802 *                          - Bits 15:0 is the alignment mask.
     5803 *                          - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
     5804 *                            IEM_MEMMAP_F_ALIGN_SSE, and
     5805 *                            IEM_MEMMAP_F_ALIGN_GP_OR_AC.
     5806 *                      Pass zero to skip alignment.
     5807 */
     5808VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
     5809                       uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
    58045810{
    58055811    /*
     
    58365842    else
    58375843        return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
     5844
     5845    /*
     5846     * Alignment check.
     5847     */
     5848    if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
     5849    { /* likelyish */ }
     5850    else
     5851    {
     5852        /* Misaligned access. */
     5853        if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
     5854        {
     5855            if (   !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
     5856                || (   (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
     5857                    && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
     5858            {
     5859                AssertCompile(X86_CR0_AM == X86_EFL_AC);
     5860
     5861                if (iemMemAreAlignmentChecksEnabled(pVCpu))
     5862                    return iemRaiseAlignmentCheckException(pVCpu);
     5863            }
     5864            else if (   (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
     5865                     && iemMemAreAlignmentChecksEnabled(pVCpu)
     5866/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
     5867 *        implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
     5868                    )
     5869                return iemRaiseAlignmentCheckException(pVCpu);
     5870            else
     5871                return iemRaiseGeneralProtectionFault0(pVCpu);
     5872        }
     5873    }
    58385874
    58395875#ifdef IEM_WITH_DATA_TLB
     
    60726108 * @returns Pointer to the mapped memory.
    60736109 *
    6074  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6075  * @param   cbMem               The number of bytes to map.  This is usually 1,
    6076  *                              2, 4, 6, 8, 12, 16, 32 or 512.  When used by
    6077  *                              string operations it can be up to a page.
    6078  * @param   iSegReg             The index of the segment register to use for
    6079  *                              this access.  The base and limits are checked.
    6080  *                              Use UINT8_MAX to indicate that no segmentation
    6081  *                              is required (for IDT, GDT and LDT accesses).
    6082  * @param   GCPtrMem            The address of the guest memory.
    6083  * @param   fAccess             How the memory is being accessed.  The
    6084  *                              IEM_ACCESS_TYPE_XXX bit is used to figure out
    6085  *                              how to map the memory, while the
    6086  *                              IEM_ACCESS_WHAT_XXX bit is used when raising
    6087  *                              exceptions.
    6088  */
    6089 void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
     6110 * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
     6111 * @param   cbMem       The number of bytes to map.  This is usually 1,
     6112 *                      2, 4, 6, 8, 12, 16, 32 or 512.  When used by
     6113 *                      string operations it can be up to a page.
     6114 * @param   iSegReg     The index of the segment register to use for
     6115 *                      this access.  The base and limits are checked.
     6116 *                      Use UINT8_MAX to indicate that no segmentation
     6117 *                      is required (for IDT, GDT and LDT accesses).
     6118 * @param   GCPtrMem    The address of the guest memory.
     6119 * @param   fAccess     How the memory is being accessed.  The
     6120 *                      IEM_ACCESS_TYPE_XXX bit is used to figure out
     6121 *                      how to map the memory, while the
     6122 *                      IEM_ACCESS_WHAT_XXX bit is used when raising
     6123 *                      exceptions.
     6124 * @param   uAlignCtl   Alignment control:
     6125 *                          - Bits 15:0 is the alignment mask.
     6126 *                          - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
     6127 *                            IEM_MEMMAP_F_ALIGN_SSE, and
     6128 *                            IEM_MEMMAP_F_ALIGN_GP_OR_AC.
     6129 *                      Pass zero to skip alignment.
     6130 */
     6131void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
     6132                   uint32_t uAlignCtl) RT_NOEXCEPT
    60906133{
    60916134    /*
     
    61006143    if (rcStrict == VINF_SUCCESS) { /*likely*/ }
    61016144    else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
     6145
     6146    /*
     6147     * Alignment check.
     6148     */
     6149    if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
     6150    { /* likelyish */ }
     6151    else
     6152    {
     6153        /* Misaligned access. */
     6154        if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
     6155        {
     6156            if (   !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
     6157                || (   (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
     6158                    && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
     6159            {
     6160                AssertCompile(X86_CR0_AM == X86_EFL_AC);
     6161
     6162                if (iemMemAreAlignmentChecksEnabled(pVCpu))
     6163                    iemRaiseAlignmentCheckExceptionJmp(pVCpu);
     6164            }
     6165            else if (   (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
     6166                     && iemMemAreAlignmentChecksEnabled(pVCpu)
     6167/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
     6168 *        implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
     6169                    )
     6170                iemRaiseAlignmentCheckExceptionJmp(pVCpu);
     6171            else
     6172                iemRaiseGeneralProtectionFault0Jmp(pVCpu);
     6173        }
     6174    }
    61026175
    61036176    /*
     
    64506523    /* The lazy approach for now... */
    64516524    uint8_t const *pu8Src;
    6452     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6525    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
    64536526    if (rc == VINF_SUCCESS)
    64546527    {
     
    64736546{
    64746547    /* The lazy approach for now... */
    6475     uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6548    uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
    64766549    uint8_t const  bRet   = *pu8Src;
    64776550    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
     
    64956568    /* The lazy approach for now... */
    64966569    uint16_t const *pu16Src;
    6497     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6570    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
     6571                                IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
    64986572    if (rc == VINF_SUCCESS)
    64996573    {
     
    65186592{
    65196593    /* The lazy approach for now... */
    6520     uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6594    uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
     6595                                                             sizeof(*pu16Src) - 1);
    65216596    uint16_t const u16Ret = *pu16Src;
    65226597    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
     
    65406615    /* The lazy approach for now... */
    65416616    uint32_t const *pu32Src;
    6542     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6617    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
     6618                                IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
    65436619    if (rc == VINF_SUCCESS)
    65446620    {
     
    65646640    /* The lazy approach for now... */
    65656641    uint32_t const *pu32Src;
    6566     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6642    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
     6643                                IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
    65676644    if (rc == VINF_SUCCESS)
    65686645    {
     
    65876664uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
    65886665{
    6589     uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6666    uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
     6667                                                             sizeof(*pu32Src) - 1);
    65906668    uint32_t const  u32Ret  = *pu32Src;
    65916669    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
     
    66576735
    66586736# else
    6659     uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6737    uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
     6738                                                             IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
    66606739    uint32_t const  u32Ret  = *pu32Src;
    66616740    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
     
    66816760    /* The lazy approach for now... */
    66826761    int32_t const *pi32Src;
    6683     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6762    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
     6763                                IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
    66846764    if (rc == VINF_SUCCESS)
    66856765    {
     
    67106790    /* The lazy approach for now... */
    67116791    uint64_t const *pu64Src;
    6712     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6792    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
     6793                                IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
    67136794    if (rc == VINF_SUCCESS)
    67146795    {
     
    67336814{
    67346815    /* The lazy approach for now... */
    6735     uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6816    uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
     6817                                                             IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
    67366818    uint64_t const u64Ret = *pu64Src;
    67376819    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
     
    67546836{
    67556837    /* The lazy approach for now... */
    6756     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    6757     if (RT_UNLIKELY(GCPtrMem & 15))
    6758         return iemRaiseGeneralProtectionFault0(pVCpu);
    6759 
    67606838    uint64_t const *pu64Src;
    6761     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6839    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
     6840                                IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    67626841    if (rc == VINF_SUCCESS)
    67636842    {
     
    67826861{
    67836862    /* The lazy approach for now... */
    6784     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    6785     if (RT_LIKELY(!(GCPtrMem & 15)))
    6786     {
    6787         uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    6788         uint64_t const u64Ret = *pu64Src;
    6789         iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
    6790         return u64Ret;
    6791     }
    6792 
    6793     VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
    6794     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
     6863    uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
     6864                                                             15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
     6865    uint64_t const u64Ret = *pu64Src;
     6866    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
     6867    return u64Ret;
    67956868}
    67966869#endif
     
    68116884    /* The lazy approach for now... */
    68126885    PCRTFLOAT80U pr80Src;
    6813     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6886    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem,
     6887                                IEM_ACCESS_DATA_R, 7 /** @todo FLD alignment check */ );
    68146888    if (rc == VINF_SUCCESS)
    68156889    {
     
    68346908{
    68356909    /* The lazy approach for now... */
    6836     PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6910    PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem,
     6911                                                      IEM_ACCESS_DATA_R, 7 /** @todo FLD alignment check */);
    68376912    *pr80Dst = *pr80Src;
    68386913    iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
     
    68556930    /* The lazy approach for now... */
    68566931    PCRTPBCD80U pd80Src;
    6857     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6932    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
     6933                                IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
    68586934    if (rc == VINF_SUCCESS)
    68596935    {
     
    68786954{
    68796955    /* The lazy approach for now... */
    6880     PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6956    PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
     6957                                                    IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
    68816958    *pd80Dst = *pd80Src;
    68826959    iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
     
    68996976    /* The lazy approach for now... */
    69006977    PCRTUINT128U pu128Src;
    6901     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     6978    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
     6979                                IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
    69026980    if (rc == VINF_SUCCESS)
    69036981    {
     
    69237001{
    69247002    /* The lazy approach for now... */
    6925     PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     7003    PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
     7004                                                       IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
    69267005    pu128Dst->au64[0] = pu128Src->au64[0];
    69277006    pu128Dst->au64[1] = pu128Src->au64[1];
     
    69477026{
    69487027    /* The lazy approach for now... */
    6949     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    6950     if (   (GCPtrMem & 15)
    6951         && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    6952         return iemRaiseGeneralProtectionFault0(pVCpu);
    6953 
    69547028    PCRTUINT128U pu128Src;
    6955     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     7029    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
     7030                                IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    69567031    if (rc == VINF_SUCCESS)
    69577032    {
     
    69807055{
    69817056    /* The lazy approach for now... */
    6982     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    6983     if (   (GCPtrMem & 15) == 0
    6984         || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    6985     {
    6986         PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    6987         pu128Dst->au64[0] = pu128Src->au64[0];
    6988         pu128Dst->au64[1] = pu128Src->au64[1];
    6989         iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
    6990         return;
    6991     }
    6992 
    6993     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    6994     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
     7057    PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
     7058                                                       (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
     7059    pu128Dst->au64[0] = pu128Src->au64[0];
     7060    pu128Dst->au64[1] = pu128Src->au64[1];
     7061    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
    69957062}
    69967063#endif
     
    70117078    /* The lazy approach for now... */
    70127079    PCRTUINT256U pu256Src;
    7013     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     7080    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
     7081                                IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
    70147082    if (rc == VINF_SUCCESS)
    70157083    {
     
    70377105{
    70387106    /* The lazy approach for now... */
    7039     PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     7107    PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
     7108                                                       IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
    70407109    pu256Dst->au64[0] = pu256Src->au64[0];
    70417110    pu256Dst->au64[1] = pu256Src->au64[1];
     
    70637132{
    70647133    /* The lazy approach for now... */
    7065     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
    7066     if (GCPtrMem & 31)
    7067         return iemRaiseGeneralProtectionFault0(pVCpu);
    7068 
    70697134    PCRTUINT256U pu256Src;
    7070     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
     7135    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
     7136                                IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    70717137    if (rc == VINF_SUCCESS)
    70727138    {
     
    70977163{
    70987164    /* The lazy approach for now... */
    7099     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
    7100     if ((GCPtrMem & 31) == 0)
    7101     {
    7102         PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    7103         pu256Dst->au64[0] = pu256Src->au64[0];
    7104         pu256Dst->au64[1] = pu256Src->au64[1];
    7105         pu256Dst->au64[2] = pu256Src->au64[2];
    7106         pu256Dst->au64[3] = pu256Src->au64[3];
    7107         iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
    7108         return;
    7109     }
    7110 
    7111     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    7112     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
     7165    PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
     7166                                                       (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
     7167    pu256Dst->au64[0] = pu256Src->au64[0];
     7168    pu256Dst->au64[1] = pu256Src->au64[1];
     7169    pu256Dst->au64[2] = pu256Src->au64[2];
     7170    pu256Dst->au64[3] = pu256Src->au64[3];
     7171    iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
    71137172}
    71147173#endif
     
    72027261    /* The lazy approach for now... */
    72037262    uint8_t *pu8Dst;
    7204     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7263    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
    72057264    if (rc == VINF_SUCCESS)
    72067265    {
     
    72257284{
    72267285    /* The lazy approach for now... */
    7227     uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7286    uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
    72287287    *pu8Dst = u8Value;
    72297288    iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
     
    72467305    /* The lazy approach for now... */
    72477306    uint16_t *pu16Dst;
    7248     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7307    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
     7308                                IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
    72497309    if (rc == VINF_SUCCESS)
    72507310    {
     
    72697329{
    72707330    /* The lazy approach for now... */
    7271     uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7331    uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
     7332                                                 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
    72727333    *pu16Dst = u16Value;
    72737334    iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
     
    72907351    /* The lazy approach for now... */
    72917352    uint32_t *pu32Dst;
    7292     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7353    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
     7354                                IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
    72937355    if (rc == VINF_SUCCESS)
    72947356    {
     
    73147376{
    73157377    /* The lazy approach for now... */
    7316     uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7378    uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
     7379                                                 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
    73177380    *pu32Dst = u32Value;
    73187381    iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
     
    73357398    /* The lazy approach for now... */
    73367399    uint64_t *pu64Dst;
    7337     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7400    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
     7401                                IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
    73387402    if (rc == VINF_SUCCESS)
    73397403    {
     
    73587422{
    73597423    /* The lazy approach for now... */
    7360     uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7424    uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
     7425                                                 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
    73617426    *pu64Dst = u64Value;
    73627427    iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
     
    73797444    /* The lazy approach for now... */
    73807445    PRTUINT128U pu128Dst;
    7381     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7446    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
     7447                                IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
    73827448    if (rc == VINF_SUCCESS)
    73837449    {
     
    74037469{
    74047470    /* The lazy approach for now... */
    7405     PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7471    PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
     7472                                                     IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
    74067473    pu128Dst->au64[0] = u128Value.au64[0];
    74077474    pu128Dst->au64[1] = u128Value.au64[1];
     
    74247491{
    74257492    /* The lazy approach for now... */
    7426     if (   (GCPtrMem & 15)
    7427         && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    7428         return iemRaiseGeneralProtectionFault0(pVCpu);
    7429 
    74307493    PRTUINT128U pu128Dst;
    7431     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7494    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
     7495                                (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    74327496    if (rc == VINF_SUCCESS)
    74337497    {
     
    74547518{
    74557519    /* The lazy approach for now... */
    7456     if (   (GCPtrMem & 15) == 0
    7457         || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    7458     {
    7459         PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    7460         pu128Dst->au64[0] = u128Value.au64[0];
    7461         pu128Dst->au64[1] = u128Value.au64[1];
    7462         iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
    7463         return;
    7464     }
    7465 
    7466     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    7467     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
     7520    PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
     7521                                                     (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
     7522    pu128Dst->au64[0] = u128Value.au64[0];
     7523    pu128Dst->au64[1] = u128Value.au64[1];
     7524    iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
    74687525}
    74697526#endif
     
    74847541    /* The lazy approach for now... */
    74857542    PRTUINT256U pu256Dst;
    7486     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7543    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7544                                IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
    74877545    if (rc == VINF_SUCCESS)
    74887546    {
     
    75107568{
    75117569    /* The lazy approach for now... */
    7512     PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7570    PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7571                                                     IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
    75137572    pu256Dst->au64[0] = pu256Value->au64[0];
    75147573    pu256Dst->au64[1] = pu256Value->au64[1];
     
    75217580
    75227581/**
    7523  * Stores a data dqword, AVX aligned.
     7582 * Stores a data dqword, AVX \#GP(0) aligned.
    75247583 *
    75257584 * @returns Strict VBox status code.
     
    75337592{
    75347593    /* The lazy approach for now... */
    7535     if (GCPtrMem & 31)
    7536         return iemRaiseGeneralProtectionFault0(pVCpu);
    7537 
    75387594    PRTUINT256U pu256Dst;
    7539     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
     7595    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7596                                IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
    75407597    if (rc == VINF_SUCCESS)
    75417598    {
     
    75647621{
    75657622    /* The lazy approach for now... */
    7566     if ((GCPtrMem & 31) == 0)
    7567     {
    7568         PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    7569         pu256Dst->au64[0] = pu256Value->au64[0];
    7570         pu256Dst->au64[1] = pu256Value->au64[1];
    7571         pu256Dst->au64[2] = pu256Value->au64[2];
    7572         pu256Dst->au64[3] = pu256Value->au64[3];
    7573         iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
    7574         return;
    7575     }
    7576 
    7577     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    7578     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
     7623    PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7624                                                     IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
     7625    pu256Dst->au64[0] = pu256Value->au64[0];
     7626    pu256Dst->au64[1] = pu256Value->au64[1];
     7627    pu256Dst->au64[2] = pu256Value->au64[2];
     7628    pu256Dst->au64[3] = pu256Value->au64[3];
     7629    iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
    75797630}
    75807631#endif
     
    75967647    /*
    75977648     * The SIDT and SGDT instructions actually stores the data using two
    7598      * independent writes.  The instructions does not respond to opsize prefixes.
     7649     * independent writes (see bs3CpuBasic2_sidt_sgdt_One).  The instructions
     7650     * does not respond to opsize prefixes.
    75997651     */
    76007652    VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
     
    76297681    /* Write the word the lazy way. */
    76307682    uint16_t *pu16Dst;
    7631     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
     7683    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
     7684                                IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
    76327685    if (rc == VINF_SUCCESS)
    76337686    {
     
    76597712    /* Write the dword the lazy way. */
    76607713    uint32_t *pu32Dst;
    7661     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
     7714    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
     7715                                IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
    76627716    if (rc == VINF_SUCCESS)
    76637717    {
     
    76987752     * ancient hardware when it actually did change. */
    76997753    uint16_t *pu16Dst;
    7700     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
     7754    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
     7755                                IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
    77017756    if (rc == VINF_SUCCESS)
    77027757    {
     
    77287783    /* Write the word the lazy way. */
    77297784    uint64_t *pu64Dst;
    7730     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
     7785    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
     7786                                IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
    77317787    if (rc == VINF_SUCCESS)
    77327788    {
     
    77587814    /* Write the word the lazy way. */
    77597815    uint16_t const *pu16Src;
    7760     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
     7816    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
     7817                                IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
    77617818    if (rc == VINF_SUCCESS)
    77627819    {
     
    77887845    /* Write the word the lazy way. */
    77897846    uint32_t const *pu32Src;
    7790     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
     7847    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
     7848                                IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
    77917849    if (rc == VINF_SUCCESS)
    77927850    {
     
    78187876    /* Write the word the lazy way. */
    78197877    uint64_t const *pu64Src;
    7820     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
     7878    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
     7879                                IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
    78217880    if (rc == VINF_SUCCESS)
    78227881    {
     
    78497908    /* Write the word the lazy way. */
    78507909    uint16_t *pu16Dst;
    7851     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
     7910    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
     7911                                IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
    78527912    if (rc == VINF_SUCCESS)
    78537913    {
     
    78807940    /* Write the word the lazy way. */
    78817941    uint32_t *pu32Dst;
    7882     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
     7942    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
     7943                                IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
    78837944    if (rc == VINF_SUCCESS)
    78847945    {
     
    79117972    /* Write the word the lazy way. */
    79127973    uint64_t *pu64Dst;
    7913     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
     7974    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
     7975                                IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
    79147976    if (rc == VINF_SUCCESS)
    79157977    {
     
    79428004    /* Write the word the lazy way. */
    79438005    uint16_t const *pu16Src;
    7944     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
     8006    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
     8007                                IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
    79458008    if (rc == VINF_SUCCESS)
    79468009    {
     
    79738036    /* Write the word the lazy way. */
    79748037    uint32_t const *pu32Src;
    7975     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
     8038    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
     8039                                IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
    79768040    if (rc == VINF_SUCCESS)
    79778041    {
     
    80048068    /* Write the word the lazy way. */
    80058069    uint64_t const *pu64Src;
    8006     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
     8070    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
     8071                                      IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
    80078072    if (rcStrict == VINF_SUCCESS)
    80088073    {
     
    80278092 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    80288093 * @param   cbMem               The number of bytes to push onto the stack.
     8094 * @param   cbAlign             The alignment mask (7, 3, 1).
    80298095 * @param   ppvMem              Where to return the pointer to the stack memory.
    80308096 *                              As with the other memory functions this could be
     
    80368102 *                              iemMemStackPushCommitSpecial().
    80378103 */
    8038 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
     8104VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
     8105                                         void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
    80398106{
    80408107    Assert(cbMem < UINT8_MAX);
    80418108    RTGCPTR     GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
    8042     return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
     8109    return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
     8110                     IEM_ACCESS_STACK_W, cbAlign);
    80438111}
    80448112
     
    80678135/**
    80688136 * Begin a special stack pop (used by iret, retf and such).
     8137 *
     8138 * This will raise \#SS or \#PF if appropriate.
     8139 *
     8140 * @returns Strict VBox status code.
     8141 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     8142 * @param   cbMem               The number of bytes to pop from the stack.
     8143 * @param   cbAlign             The alignment mask (7, 3, 1).
     8144 * @param   ppvMem              Where to return the pointer to the stack memory.
     8145 * @param   puNewRsp            Where to return the new RSP value.  This must be
     8146 *                              assigned to CPUMCTX::rsp manually some time
     8147 *                              after iemMemStackPopDoneSpecial() has been
     8148 *                              called.
     8149 */
     8150VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
     8151                                        void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
     8152{
     8153    Assert(cbMem < UINT8_MAX);
     8154    RTGCPTR     GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
     8155    return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
     8156}
     8157
     8158
     8159/**
     8160 * Continue a special stack pop (used by iret and retf).
    80698161 *
    80708162 * This will raise \#SS or \#PF if appropriate.
     
    80798171 *                              called.
    80808172 */
    8081 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
    8082 {
    8083     Assert(cbMem < UINT8_MAX);
    8084     RTGCPTR     GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
    8085     return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    8086 }
    8087 
    8088 
    8089 /**
    8090  * Continue a special stack pop (used by iret and retf).
    8091  *
    8092  * This will raise \#SS or \#PF if appropriate.
    8093  *
    8094  * @returns Strict VBox status code.
    8095  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8096  * @param   cbMem               The number of bytes to pop from the stack.
    8097  * @param   ppvMem              Where to return the pointer to the stack memory.
    8098  * @param   puNewRsp            Where to return the new RSP value.  This must be
    8099  *                              assigned to CPUMCTX::rsp manually some time
    8100  *                              after iemMemStackPopDoneSpecial() has been
    8101  *                              called.
    8102  */
    81038173VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
    81048174{
     
    81088178    RTGCPTR     GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
    81098179    *puNewRsp = NewRsp.u;
    8110     return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
     8180    return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R,
     8181                     0 /* checked in iemMemStackPopBeginSpecial */);
    81118182}
    81128183
     
    81448215    /* The lazy approach for now... */
    81458216    uint8_t const *pbSrc;
    8146     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
     8217    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    81478218    if (rc == VINF_SUCCESS)
    81488219    {
     
    81688239    /* The lazy approach for now... */
    81698240    uint16_t const *pu16Src;
    8170     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
     8241    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    81718242    if (rc == VINF_SUCCESS)
    81728243    {
     
    81928263    /* The lazy approach for now... */
    81938264    uint32_t const *pu32Src;
    8194     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
     8265    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    81958266    if (rc == VINF_SUCCESS)
    81968267    {
     
    82168287    /* The lazy approach for now... */
    82178288    uint64_t const *pu64Src;
    8218     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
     8289    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    82198290    if (rc == VINF_SUCCESS)
    82208291    {
     
    83568427        /* The normal case, map the 32-bit bits around the accessed bit (40). */
    83578428        GCPtr += 2 + 2;
    8358         rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
     8429        rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
    83598430        if (rcStrict != VINF_SUCCESS)
    83608431            return rcStrict;
     
    83648435    {
    83658436        /* The misaligned GDT/LDT case, map the whole thing. */
    8366         rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
     8437        rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
    83678438        if (rcStrict != VINF_SUCCESS)
    83688439            return rcStrict;
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

    r95403 r95410  
    325325    {
    326326        uint16_t const *pa16Mem = NULL;
    327         rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
     327        rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1);
    328328        if (rcStrict == VINF_SUCCESS)
    329329        {
     
    405405    {
    406406        uint32_t const *pa32Mem;
    407         rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
     407        rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1);
    408408        if (rcStrict == VINF_SUCCESS)
    409409        {
     
    476476        GCPtrBottom--;
    477477        uint16_t *pa16Mem = NULL;
    478         rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
     478        rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1);
    479479        if (rcStrict == VINF_SUCCESS)
    480480        {
     
    547547        GCPtrBottom--;
    548548        uint32_t *pa32Mem;
    549         rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
     549        rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1);
    550550        if (rcStrict == VINF_SUCCESS)
    551551        {
     
    13271327
    13281328            GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
    1329             rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R);
     1329            rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0);
    13301330            if (rcStrict != VINF_SUCCESS)
    13311331            {
     
    14561456            void    *pvNewFrame;
    14571457            RTGCPTR  GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
    1458             rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW);
     1458            rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
    14591459            if (rcStrict != VINF_SUCCESS)
    14601460            {
     
    14771477            pVCpu->cpum.GstCtx.ss.fFlags   = CPUMSELREG_FLAGS_VALID;
    14781478            pVCpu->cpum.GstCtx.rsp         = uNewRsp;
    1479             pVCpu->iem.s.uCpl = uNewCSDpl;
     1479            pVCpu->iem.s.uCpl = uNewCSDpl; /** @todo is the parameter words accessed using the new CPL or the old CPL? */
    14801480            Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    14811481            CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
     
    14841484            /** @todo this can still fail due to SS.LIMIT not check.   */
    14851485            rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
     1486                                                   IEM_IS_LONG_MODE(pVCpu) ? 7
     1487                                                   : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
    14861488                                                   &uPtrRet.pv, &uNewRsp);
    14871489            AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
     
    14921494                if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
    14931495                {
    1494                     /* Push the old CS:rIP. */
    1495                     uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
    1496                     uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
    1497 
    14981496                    if (cbWords)
    14991497                    {
    15001498                        /* Map the relevant chunk of the old stack. */
    1501                         rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
     1499                        rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds,
     1500                                             IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
    15021501                        if (rcStrict != VINF_SUCCESS)
    15031502                        {
     
    15191518                    }
    15201519
     1520                    /* Push the old CS:rIP. */
     1521                    uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
     1522                    uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
     1523
    15211524                    /* Push the old SS:rSP. */
    15221525                    uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
     
    15271530                    Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
    15281531
    1529                     /* Push the old CS:rIP. */
    1530                     uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
    1531                     uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
    1532 
    15331532                    if (cbWords)
    15341533                    {
    15351534                        /* Map the relevant chunk of the old stack. */
    1536                         rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
     1535                        rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds,
     1536                                             IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
    15371537                        if (rcStrict != VINF_SUCCESS)
    15381538                        {
     
    15531553                        }
    15541554                    }
     1555
     1556                    /* Push the old CS:rIP. */
     1557                    uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
     1558                    uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
    15551559
    15561560                    /* Push the old SS:rSP. */
     
    16401644                                                   IEM_IS_LONG_MODE(pVCpu) ? 8+8
    16411645                                                   : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
     1646                                                   IEM_IS_LONG_MODE(pVCpu) ? 7
     1647                                                   : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
    16421648                                                   &uPtrRet.pv, &uNewRsp);
    16431649            if (rcStrict != VINF_SUCCESS)
     
    19982004        /* Check stack first - may #SS(0). */
    19992005        rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
     2006                                               enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
    20002007                                               &uPtrRet.pv, &uNewRsp);
    20012008        if (rcStrict != VINF_SUCCESS)
     
    21042111     *        16-bit code cause a two or four byte CS to be pushed? */
    21052112    rcStrict = iemMemStackPushBeginSpecial(pVCpu,
    2106                                            enmEffOpSize == IEMMODE_64BIT   ? 8+8
    2107                                            : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
     2113                                           enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
     2114                                           enmEffOpSize == IEMMODE_64BIT ? 7   : enmEffOpSize == IEMMODE_32BIT ? 3   : 1,
    21082115                                           &uPtrRet.pv, &uNewRsp);
    21092116    if (rcStrict != VINF_SUCCESS)
     
    22172224    uint32_t        cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
    22182225                             : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
    2219     rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
     2226    rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
     2227                                          enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
     2228                                          &uPtrFrame.pv, &uNewRsp);
    22202229    if (rcStrict != VINF_SUCCESS)
    22212230        return rcStrict;
     
    29262935    if (enmEffOpSize == IEMMODE_32BIT)
    29272936    {
    2928         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
     2937        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp);
    29292938        if (rcStrict != VINF_SUCCESS)
    29302939            return rcStrict;
     
    29452954    else
    29462955    {
    2947         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
     2956        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
    29482957        if (rcStrict != VINF_SUCCESS)
    29492958            return rcStrict;
     
    32103219    if (enmEffOpSize == IEMMODE_32BIT)
    32113220    {
    3212         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, &uFrame.pv, &uNewRsp);
     3221        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp);
    32133222        if (rcStrict != VINF_SUCCESS)
    32143223            return rcStrict;
     
    32193228    else
    32203229    {
    3221         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, &uFrame.pv, &uNewRsp);
     3230        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
    32223231        if (rcStrict != VINF_SUCCESS)
    32233232            return rcStrict;
     
    35663575    if (enmEffOpSize == IEMMODE_64BIT)
    35673576    {
    3568         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, &uFrame.pv, &uNewRsp);
     3577        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp);
    35693578        if (rcStrict != VINF_SUCCESS)
    35703579            return rcStrict;
     
    35773586    else if (enmEffOpSize == IEMMODE_32BIT)
    35783587    {
    3579         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, &uFrame.pv, &uNewRsp);
     3588        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp);
    35803589        if (rcStrict != VINF_SUCCESS)
    35813590            return rcStrict;
     
    35893598    {
    35903599        Assert(enmEffOpSize == IEMMODE_16BIT);
    3591         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, &uFrame.pv, &uNewRsp);
     3600        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp);
    35923601        if (rcStrict != VINF_SUCCESS)
    35933602            return rcStrict;
     
    39553964    uint8_t const *pa8Mem;
    39563965    RTGCPHYS GCPtrStart = 0x800;    /* Fixed table location. */
    3957     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R);
     3966    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
    39583967    if (rcStrict != VINF_SUCCESS)
    39593968        return rcStrict;
     
    54535462     */
    54545463    void *pvDesc;
    5455     rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
     5464    rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL),
     5465                         IEM_ACCESS_DATA_RW, 0);
    54565466    if (rcStrict != VINF_SUCCESS)
    54575467        return rcStrict;
     
    83168326    if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
    83178327        return iemRaiseDeviceNotAvailable(pVCpu);
    8318     if (GCPtrEff & 15)
    8319     {
    8320         /** @todo CPU/VM detection possible! \#AC might not be signal for
    8321          * all/any misalignment sizes, intel says its an implementation detail. */
    8322         if (   (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
    8323             && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
    8324             && pVCpu->iem.s.uCpl == 3)
    8325             return iemRaiseAlignmentCheckException(pVCpu);
    8326         return iemRaiseGeneralProtectionFault0(pVCpu);
    8327     }
    83288328
    83298329    /*
     
    83318331     */
    83328332    void *pvMem512;
    8333     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     8333    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
     8334                                      15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    83348335    if (rcStrict != VINF_SUCCESS)
    83358336        return rcStrict;
     
    84198420    if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
    84208421        return iemRaiseDeviceNotAvailable(pVCpu);
    8421     if (GCPtrEff & 15)
    8422     {
    8423         /** @todo CPU/VM detection possible! \#AC might not be signal for
    8424          * all/any misalignment sizes, intel says its an implementation detail. */
    8425         if (   (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
    8426             && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
    8427             && pVCpu->iem.s.uCpl == 3)
    8428             return iemRaiseAlignmentCheckException(pVCpu);
    8429         return iemRaiseGeneralProtectionFault0(pVCpu);
    8430     }
    84318422
    84328423    /*
     
    84348425     */
    84358426    void *pvMem512;
    8436     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
     8427    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
     8428                                      15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    84378429    if (rcStrict != VINF_SUCCESS)
    84388430        return rcStrict;
     
    85408532    if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
    85418533        return iemRaiseDeviceNotAvailable(pVCpu);
    8542     if (GCPtrEff & 63)
    8543     {
    8544         /** @todo CPU/VM detection possible! \#AC might not be signal for
    8545          * all/any misalignment sizes, intel says its an implementation detail. */
    8546         if (   (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
    8547             && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
    8548             && pVCpu->iem.s.uCpl == 3)
    8549             return iemRaiseAlignmentCheckException(pVCpu);
    8550         return iemRaiseGeneralProtectionFault0(pVCpu);
    8551     }
    85528534
    85538535    /*
     
    85688550    /* The x87+SSE state.  */
    85698551    void *pvMem512;
    8570     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     8552    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
     8553                                      63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    85718554    if (rcStrict != VINF_SUCCESS)
    85728555        return rcStrict;
     
    85768559    /* The header.  */
    85778560    PX86XSAVEHDR pHdr;
    8578     rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW);
     8561    rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
    85798562    if (rcStrict != VINF_SUCCESS)
    85808563        return rcStrict;
     
    86488631        PX86XSAVEYMMHI  pCompDst;
    86498632        rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
    8650                              IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     8633                             IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
    86518634        if (rcStrict != VINF_SUCCESS)
    86528635            return rcStrict;
     
    87228705    /* The x87+SSE state.  */
    87238706    void *pvMem512;
    8724     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
     8707    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
     8708                                      63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    87258709    if (rcStrict != VINF_SUCCESS)
    87268710        return rcStrict;
     
    87338717    PX86XSAVEHDR  pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
    87348718    PCX86XSAVEHDR pHdrSrc;
    8735     rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R);
     8719    rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
     8720                         IEM_ACCESS_DATA_R, 0 /* checked above */);
    87368721    if (rcStrict != VINF_SUCCESS)
    87378722        return rcStrict;
     
    88578842            PCX86XSAVEYMMHI pCompSrc;
    88588843            rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
    8859                                  iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R);
     8844                                 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
     8845                                 IEM_ACCESS_DATA_R, 0 /* checked above */);
    88608846            if (rcStrict != VINF_SUCCESS)
    88618847                return rcStrict;
     
    91569142    RTPTRUNION   uPtr;
    91579143    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
    9158                                       iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     9144                                      iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
     9145                                      enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
    91599146    if (rcStrict != VINF_SUCCESS)
    91609147        return rcStrict;
     
    91859172    RTPTRUNION   uPtr;
    91869173    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
    9187                                       iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     9174                                      iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
    91889175    if (rcStrict != VINF_SUCCESS)
    91899176        return rcStrict;
     
    92359222    RTCPTRUNION  uPtr;
    92369223    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
    9237                                       iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
     9224                                      iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
     9225                                      enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
    92389226    if (rcStrict != VINF_SUCCESS)
    92399227        return rcStrict;
     
    92629250    RTCPTRUNION  uPtr;
    92639251    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
    9264                                       iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
     9252                                      iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
    92659253    if (rcStrict != VINF_SUCCESS)
    92669254        return rcStrict;
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h

    r94768 r95410  
    12311231
    12321232    OP_TYPE        *puMem;
    1233     rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI, IEM_ACCESS_DATA_W);
     1233    rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI,
     1234                         IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
    12341235    if (rcStrict != VINF_SUCCESS)
    12351236        return rcStrict;
     
    14211422        {
    14221423            OP_TYPE *puMem;
    1423             rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
     1424            rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg,
     1425                                 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
    14241426            if (rcStrict != VINF_SUCCESS)
    14251427                return rcStrict;
  • trunk/src/VBox/VMM/include/IEMInline.h

    r94838 r95410  
    21982198 */
    21992199
     2200
     2201/**
     2202 * Checks whether alignment checks are enabled or not.
     2203 *
     2204 * @returns true if enabled, false if not.
     2205 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2206 */
     2207DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu)
     2208{
     2209    AssertCompile(X86_CR0_AM == X86_EFL_AC);
     2210    return pVCpu->iem.s.uCpl == 3
     2211        && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
     2212}
     2213
    22002214/**
    22012215 * Checks if the given segment can be written to, raise the appropriate
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r95403 r95410  
    28112811/** @name   Memory access.
    28122812 * @{ */
    2813 VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT;
     2813
     2814/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
     2815#define IEM_MEMMAP_F_ALIGN_GP       RT_BIT_32(16)
     2816/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
     2817 *  when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
     2818#define IEM_MEMMAP_F_ALIGN_SSE      RT_BIT_32(17)
     2819/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
     2820 * Users include FXSAVE & FXRSTOR. */
     2821#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
     2822
     2823VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
     2824                          uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
    28142825VBOXSTRICTRC    iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
    28152826#ifndef IN_RING3
     
    28992910#endif
    29002911
    2901 VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
     2912VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
     2913                                            void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
    29022914VBOXSTRICTRC    iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
    29032915VBOXSTRICTRC    iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
     
    29082920VBOXSTRICTRC    iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    29092921VBOXSTRICTRC    iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
    2910 VBOXSTRICTRC    iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
     2922VBOXSTRICTRC    iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
     2923                                           void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
    29112924VBOXSTRICTRC    iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
    29122925VBOXSTRICTRC    iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
  • trunk/src/VBox/VMM/include/IEMMc.h

    r95403 r95410  
    883883 */
    884884#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
    885     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
     885    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), \
     886                                       (a_GCPtrMem), (a_fAccess), sizeof(*(a_pMem)) - 1))
    886887
    887888/** Maps guest memory for direct or bounce buffered access.
     
    890891 */
    891892#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
    892     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
     893    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), \
     894                                       (a_GCPtrMem), (a_fAccess), (a_cbMem) - 1))
    893895
    894896/** Commits the memory and unmaps the guest memory.
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette