VirtualBox

Changeset 102430 in vbox


Ignore:
Timestamp:
Dec 2, 2023 2:39:20 AM (14 months ago)
Author:
vboxsync
Message:

VMM/IEM: Refactored iemMemMap and friends to work with bUnmapInfo / bMapInfo. bugref:10371

Location:
trunk/src/VBox/VMM
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r102428 r102430  
    21062106     * Push the stack frame.
    21072107     */
     2108    uint8_t   bUnmapInfo;
    21082109    uint16_t *pu16Frame;
    21092110    uint64_t  uNewRsp;
    2110     rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
     2111    rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
    21112112    if (rcStrict != VINF_SUCCESS)
    21122113        return rcStrict;
     
    21212122    pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
    21222123    pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
    2123     rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
     2124    rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
    21242125    if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    21252126        return rcStrict;
     
    23112312 * @param   uErr            The error value if IEM_XCPT_FLAGS_ERR is set.
    23122313 * @param   uCr2            The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
    2313  * @param   SelTSS          The TSS selector of the new task.
    2314  * @param   pNewDescTSS     Pointer to the new TSS descriptor.
     2314 * @param   SelTss          The TSS selector of the new task.
     2315 * @param   pNewDescTss     Pointer to the new TSS descriptor.
    23152316 */
    23162317VBOXSTRICTRC
     
    23212322              uint16_t        uErr,
    23222323              uint64_t        uCr2,
    2323               RTSEL           SelTSS,
    2324               PIEMSELDESC     pNewDescTSS) RT_NOEXCEPT
     2324              RTSEL           SelTss,
     2325              PIEMSELDESC     pNewDescTss) RT_NOEXCEPT
    23252326{
    23262327    Assert(!IEM_IS_REAL_MODE(pVCpu));
     
    23282329    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    23292330
    2330     uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
    2331     Assert(   uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
    2332            || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
    2333            || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
    2334            || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
    2335 
    2336     bool const fIsNewTSS386 = (   uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
    2337                                || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
    2338 
    2339     Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
    2340          fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
     2331    uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
     2332    Assert(   uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
     2333           || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
     2334           || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
     2335           || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
     2336
     2337    bool const fIsNewTss386 = (   uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
     2338                               || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
     2339
     2340    Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
     2341         fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
    23412342
    23422343    /* Update CR2 in case it's a page-fault. */
     
    23502351     * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
    23512352     */
    2352     uint32_t const uNewTSSLimit    = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
    2353     uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
    2354     if (uNewTSSLimit < uNewTSSLimitMin)
    2355     {
    2356         Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
    2357              enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
    2358         return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
     2353    uint32_t const uNewTssLimit    = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
     2354    uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
     2355    if (uNewTssLimit < uNewTssLimitMin)
     2356    {
     2357        Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
     2358             enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
     2359        return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
    23592360    }
    23602361
     
    23682369    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    23692370    {
    2370         Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
    2371         IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
     2371        Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
     2372        IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
    23722373    }
    23732374
     
    23782379    if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
    23792380    {
    2380         uint32_t const uExitInfo1 = SelTSS;
     2381        uint32_t const uExitInfo1 = SelTss;
    23812382        uint32_t       uExitInfo2 = uErr;
    23822383        switch (enmTaskSwitch)
     
    24042405     * end up with smaller than "legal" TSS limits.
    24052406     */
    2406     uint32_t const uCurTSSLimit    = pVCpu->cpum.GstCtx.tr.u32Limit;
    2407     uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
    2408     if (uCurTSSLimit < uCurTSSLimitMin)
    2409     {
    2410         Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
    2411              enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
    2412         return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
     2407    uint32_t const uCurTssLimit    = pVCpu->cpum.GstCtx.tr.u32Limit;
     2408    uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
     2409    if (uCurTssLimit < uCurTssLimitMin)
     2410    {
     2411        Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
     2412             enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
     2413        return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
    24132414    }
    24142415
     
    24172418     * and not the entire TSS.
    24182419     */
    2419     void           *pvNewTSS;
    2420     uint32_t  const cbNewTSS    = uNewTSSLimitMin + 1;
    2421     RTGCPTR   const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
     2420    uint8_t         bUnmapInfoNewTss;
     2421    void           *pvNewTss;
     2422    uint32_t  const cbNewTss    = uNewTssLimitMin + 1;
     2423    RTGCPTR   const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
    24222424    AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
    24232425    /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
    24242426     *        not perform correct translation if this happens. See Intel spec. 7.2.1
    24252427     *        "Task-State Segment". */
    2426     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
     2428    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
     2429/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
     2430 * Consider wrapping the remainder into a function for simpler cleanup. */
    24272431    if (rcStrict != VINF_SUCCESS)
    24282432    {
    2429         Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
    2430              cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
     2433        Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
     2434             cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
    24312435        return rcStrict;
    24322436    }
     
    24392443        || enmTaskSwitch == IEMTASKSWITCH_IRET)
    24402444    {
    2441         PX86DESC pDescCurTSS;
    2442         rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
     2445        uint8_t  bUnmapInfoDescCurTss;
     2446        PX86DESC pDescCurTss;
     2447        rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
    24432448                             pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
    24442449        if (rcStrict != VINF_SUCCESS)
     
    24492454        }
    24502455
    2451         pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
    2452         rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
     2456        pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
     2457        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
    24532458        if (rcStrict != VINF_SUCCESS)
    24542459        {
     
    24612466        if (enmTaskSwitch == IEMTASKSWITCH_IRET)
    24622467        {
    2463             Assert(   uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
    2464                    || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
     2468            Assert(   uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
     2469                   || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
    24652470            fEFlags &= ~X86_EFL_NT;
    24662471        }
     
    24702475     * Save the CPU state into the current TSS.
    24712476     */
    2472     RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
    2473     if (GCPtrNewTSS == GCPtrCurTSS)
    2474     {
    2475         Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
     2477    RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
     2478    if (GCPtrNewTss == GCPtrCurTss)
     2479    {
     2480        Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
    24762481        Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
    24772482             pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
     
    24792484             pVCpu->cpum.GstCtx.ldtr.Sel));
    24802485    }
    2481     if (fIsNewTSS386)
     2486    if (fIsNewTss386)
    24822487    {
    24832488        /*
     
    24852490         * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
    24862491         */
    2487         void          *pvCurTSS32;
    2488         uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
    2489         uint32_t const cbCurTSS  = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
     2492        uint8_t        bUnmapInfoCurTss32;
     2493        void          *pvCurTss32;
     2494        uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
     2495        uint32_t const cbCurTss  = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
    24902496        AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
    2491         rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
     2497        rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
     2498                             GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
    24922499        if (rcStrict != VINF_SUCCESS)
    24932500        {
    2494             Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
    2495                  enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
     2501            Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
     2502                 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
    24962503            return rcStrict;
    24972504        }
    24982505
    2499         /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
    2500         PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
    2501         pCurTSS32->eip    = uNextEip;
    2502         pCurTSS32->eflags = fEFlags;
    2503         pCurTSS32->eax    = pVCpu->cpum.GstCtx.eax;
    2504         pCurTSS32->ecx    = pVCpu->cpum.GstCtx.ecx;
    2505         pCurTSS32->edx    = pVCpu->cpum.GstCtx.edx;
    2506         pCurTSS32->ebx    = pVCpu->cpum.GstCtx.ebx;
    2507         pCurTSS32->esp    = pVCpu->cpum.GstCtx.esp;
    2508         pCurTSS32->ebp    = pVCpu->cpum.GstCtx.ebp;
    2509         pCurTSS32->esi    = pVCpu->cpum.GstCtx.esi;
    2510         pCurTSS32->edi    = pVCpu->cpum.GstCtx.edi;
    2511         pCurTSS32->es     = pVCpu->cpum.GstCtx.es.Sel;
    2512         pCurTSS32->cs     = pVCpu->cpum.GstCtx.cs.Sel;
    2513         pCurTSS32->ss     = pVCpu->cpum.GstCtx.ss.Sel;
    2514         pCurTSS32->ds     = pVCpu->cpum.GstCtx.ds.Sel;
    2515         pCurTSS32->fs     = pVCpu->cpum.GstCtx.fs.Sel;
    2516         pCurTSS32->gs     = pVCpu->cpum.GstCtx.gs.Sel;
    2517 
    2518         rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
     2506        /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
     2507        PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
     2508        pCurTss32->eip    = uNextEip;
     2509        pCurTss32->eflags = fEFlags;
     2510        pCurTss32->eax    = pVCpu->cpum.GstCtx.eax;
     2511        pCurTss32->ecx    = pVCpu->cpum.GstCtx.ecx;
     2512        pCurTss32->edx    = pVCpu->cpum.GstCtx.edx;
     2513        pCurTss32->ebx    = pVCpu->cpum.GstCtx.ebx;
     2514        pCurTss32->esp    = pVCpu->cpum.GstCtx.esp;
     2515        pCurTss32->ebp    = pVCpu->cpum.GstCtx.ebp;
     2516        pCurTss32->esi    = pVCpu->cpum.GstCtx.esi;
     2517        pCurTss32->edi    = pVCpu->cpum.GstCtx.edi;
     2518        pCurTss32->es     = pVCpu->cpum.GstCtx.es.Sel;
     2519        pCurTss32->cs     = pVCpu->cpum.GstCtx.cs.Sel;
     2520        pCurTss32->ss     = pVCpu->cpum.GstCtx.ss.Sel;
     2521        pCurTss32->ds     = pVCpu->cpum.GstCtx.ds.Sel;
     2522        pCurTss32->fs     = pVCpu->cpum.GstCtx.fs.Sel;
     2523        pCurTss32->gs     = pVCpu->cpum.GstCtx.gs.Sel;
     2524
     2525        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
    25192526        if (rcStrict != VINF_SUCCESS)
    25202527        {
     
    25292536         * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
    25302537         */
    2531         void          *pvCurTSS16;
    2532         uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
    2533         uint32_t const cbCurTSS  = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
     2538        uint8_t        bUnmapInfoCurTss16;
     2539        void          *pvCurTss16;
     2540        uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
     2541        uint32_t const cbCurTss  = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
    25342542        AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
    2535         rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
     2543        rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
     2544                             GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
    25362545        if (rcStrict != VINF_SUCCESS)
    25372546        {
    2538             Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
    2539                  enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
     2547            Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
     2548                 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
    25402549            return rcStrict;
    25412550        }
    25422551
    2543         /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
    2544         PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
    2545         pCurTSS16->ip    = uNextEip;
    2546         pCurTSS16->flags = (uint16_t)fEFlags;
    2547         pCurTSS16->ax    = pVCpu->cpum.GstCtx.ax;
    2548         pCurTSS16->cx    = pVCpu->cpum.GstCtx.cx;
    2549         pCurTSS16->dx    = pVCpu->cpum.GstCtx.dx;
    2550         pCurTSS16->bx    = pVCpu->cpum.GstCtx.bx;
    2551         pCurTSS16->sp    = pVCpu->cpum.GstCtx.sp;
    2552         pCurTSS16->bp    = pVCpu->cpum.GstCtx.bp;
    2553         pCurTSS16->si    = pVCpu->cpum.GstCtx.si;
    2554         pCurTSS16->di    = pVCpu->cpum.GstCtx.di;
    2555         pCurTSS16->es    = pVCpu->cpum.GstCtx.es.Sel;
    2556         pCurTSS16->cs    = pVCpu->cpum.GstCtx.cs.Sel;
    2557         pCurTSS16->ss    = pVCpu->cpum.GstCtx.ss.Sel;
    2558         pCurTSS16->ds    = pVCpu->cpum.GstCtx.ds.Sel;
    2559 
    2560         rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
     2552        /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
     2553        PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
     2554        pCurTss16->ip    = uNextEip;
     2555        pCurTss16->flags = (uint16_t)fEFlags;
     2556        pCurTss16->ax    = pVCpu->cpum.GstCtx.ax;
     2557        pCurTss16->cx    = pVCpu->cpum.GstCtx.cx;
     2558        pCurTss16->dx    = pVCpu->cpum.GstCtx.dx;
     2559        pCurTss16->bx    = pVCpu->cpum.GstCtx.bx;
     2560        pCurTss16->sp    = pVCpu->cpum.GstCtx.sp;
     2561        pCurTss16->bp    = pVCpu->cpum.GstCtx.bp;
     2562        pCurTss16->si    = pVCpu->cpum.GstCtx.si;
     2563        pCurTss16->di    = pVCpu->cpum.GstCtx.di;
     2564        pCurTss16->es    = pVCpu->cpum.GstCtx.es.Sel;
     2565        pCurTss16->cs    = pVCpu->cpum.GstCtx.cs.Sel;
     2566        pCurTss16->ss    = pVCpu->cpum.GstCtx.ss.Sel;
     2567        pCurTss16->ds    = pVCpu->cpum.GstCtx.ds.Sel;
     2568
     2569        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
    25612570        if (rcStrict != VINF_SUCCESS)
    25622571        {
     
    25742583    {
    25752584        /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
    2576         PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
     2585        PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
    25772586        pNewTSS->selPrev  = pVCpu->cpum.GstCtx.tr.Sel;
    25782587    }
     
    25852594    uint16_t uNewES,  uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
    25862595    bool     fNewDebugTrap;
    2587     if (fIsNewTSS386)
    2588     {
    2589         PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
    2590         uNewCr3       = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
    2591         uNewEip       = pNewTSS32->eip;
    2592         uNewEflags    = pNewTSS32->eflags;
    2593         uNewEax       = pNewTSS32->eax;
    2594         uNewEcx       = pNewTSS32->ecx;
    2595         uNewEdx       = pNewTSS32->edx;
    2596         uNewEbx       = pNewTSS32->ebx;
    2597         uNewEsp       = pNewTSS32->esp;
    2598         uNewEbp       = pNewTSS32->ebp;
    2599         uNewEsi       = pNewTSS32->esi;
    2600         uNewEdi       = pNewTSS32->edi;
    2601         uNewES        = pNewTSS32->es;
    2602         uNewCS        = pNewTSS32->cs;
    2603         uNewSS        = pNewTSS32->ss;
    2604         uNewDS        = pNewTSS32->ds;
    2605         uNewFS        = pNewTSS32->fs;
    2606         uNewGS        = pNewTSS32->gs;
    2607         uNewLdt       = pNewTSS32->selLdt;
    2608         fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
     2596    if (fIsNewTss386)
     2597    {
     2598        PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
     2599        uNewCr3       = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
     2600        uNewEip       = pNewTss32->eip;
     2601        uNewEflags    = pNewTss32->eflags;
     2602        uNewEax       = pNewTss32->eax;
     2603        uNewEcx       = pNewTss32->ecx;
     2604        uNewEdx       = pNewTss32->edx;
     2605        uNewEbx       = pNewTss32->ebx;
     2606        uNewEsp       = pNewTss32->esp;
     2607        uNewEbp       = pNewTss32->ebp;
     2608        uNewEsi       = pNewTss32->esi;
     2609        uNewEdi       = pNewTss32->edi;
     2610        uNewES        = pNewTss32->es;
     2611        uNewCS        = pNewTss32->cs;
     2612        uNewSS        = pNewTss32->ss;
     2613        uNewDS        = pNewTss32->ds;
     2614        uNewFS        = pNewTss32->fs;
     2615        uNewGS        = pNewTss32->gs;
     2616        uNewLdt       = pNewTss32->selLdt;
     2617        fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
    26092618    }
    26102619    else
    26112620    {
    2612         PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
     2621        PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
    26132622        uNewCr3       = 0;
    2614         uNewEip       = pNewTSS16->ip;
    2615         uNewEflags    = pNewTSS16->flags;
    2616         uNewEax       = UINT32_C(0xffff0000) | pNewTSS16->ax;
    2617         uNewEcx       = UINT32_C(0xffff0000) | pNewTSS16->cx;
    2618         uNewEdx       = UINT32_C(0xffff0000) | pNewTSS16->dx;
    2619         uNewEbx       = UINT32_C(0xffff0000) | pNewTSS16->bx;
    2620         uNewEsp       = UINT32_C(0xffff0000) | pNewTSS16->sp;
    2621         uNewEbp       = UINT32_C(0xffff0000) | pNewTSS16->bp;
    2622         uNewEsi       = UINT32_C(0xffff0000) | pNewTSS16->si;
    2623         uNewEdi       = UINT32_C(0xffff0000) | pNewTSS16->di;
    2624         uNewES        = pNewTSS16->es;
    2625         uNewCS        = pNewTSS16->cs;
    2626         uNewSS        = pNewTSS16->ss;
    2627         uNewDS        = pNewTSS16->ds;
     2623        uNewEip       = pNewTss16->ip;
     2624        uNewEflags    = pNewTss16->flags;
     2625        uNewEax       = UINT32_C(0xffff0000) | pNewTss16->ax;
     2626        uNewEcx       = UINT32_C(0xffff0000) | pNewTss16->cx;
     2627        uNewEdx       = UINT32_C(0xffff0000) | pNewTss16->dx;
     2628        uNewEbx       = UINT32_C(0xffff0000) | pNewTss16->bx;
     2629        uNewEsp       = UINT32_C(0xffff0000) | pNewTss16->sp;
     2630        uNewEbp       = UINT32_C(0xffff0000) | pNewTss16->bp;
     2631        uNewEsi       = UINT32_C(0xffff0000) | pNewTss16->si;
     2632        uNewEdi       = UINT32_C(0xffff0000) | pNewTss16->di;
     2633        uNewES        = pNewTss16->es;
     2634        uNewCS        = pNewTss16->cs;
     2635        uNewSS        = pNewTss16->ss;
     2636        uNewDS        = pNewTss16->ds;
    26282637        uNewFS        = 0;
    26292638        uNewGS        = 0;
    2630         uNewLdt       = pNewTSS16->selLdt;
     2639        uNewLdt       = pNewTss16->selLdt;
    26312640        fNewDebugTrap = false;
    26322641    }
    26332642
    2634     if (GCPtrNewTSS == GCPtrCurTSS)
     2643    if (GCPtrNewTss == GCPtrCurTss)
    26352644        Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
    26362645             uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
     
    26392648     * We're done accessing the new TSS.
    26402649     */
    2641     rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
     2650    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
    26422651    if (rcStrict != VINF_SUCCESS)
    26432652    {
     
    26512660    if (enmTaskSwitch != IEMTASKSWITCH_IRET)
    26522661    {
    2653         rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
    2654                              pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
     2662        rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
     2663                             pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
    26552664        if (rcStrict != VINF_SUCCESS)
    26562665        {
     
    26612670
    26622671        /* Check that the descriptor indicates the new TSS is available (not busy). */
    2663         AssertMsg(   pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
    2664                   || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
    2665                      ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
    2666 
    2667         pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
    2668         rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
     2672        AssertMsg(   pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
     2673                  || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
     2674                  ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
     2675
     2676        pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
     2677        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
    26692678        if (rcStrict != VINF_SUCCESS)
    26702679        {
     
    26792688     * until the completion of the task switch but before executing any instructions in the new task.
    26802689     */
    2681     pVCpu->cpum.GstCtx.tr.Sel      = SelTSS;
    2682     pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
     2690    pVCpu->cpum.GstCtx.tr.Sel      = SelTss;
     2691    pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
    26832692    pVCpu->cpum.GstCtx.tr.fFlags   = CPUMSELREG_FLAGS_VALID;
    2684     pVCpu->cpum.GstCtx.tr.Attr.u   = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
    2685     pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
    2686     pVCpu->cpum.GstCtx.tr.u64Base  = X86DESC_BASE(&pNewDescTSS->Legacy);
     2693    pVCpu->cpum.GstCtx.tr.Attr.u   = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
     2694    pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
     2695    pVCpu->cpum.GstCtx.tr.u64Base  = X86DESC_BASE(&pNewDescTss->Legacy);
    26872696    CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
    26882697
     
    27582767     * Switch CR3 for the new task.
    27592768     */
    2760     if (   fIsNewTSS386
     2769    if (   fIsNewTss386
    27612770        && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
    27622771    {
     
    30003009
    30013010    /** @todo Debug trap. */
    3002     if (fIsNewTSS386 && fNewDebugTrap)
     3011    if (fIsNewTss386 && fNewDebugTrap)
    30033012        Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
    30043013
     
    30223031        Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
    30233032        uint32_t      cbLimitSS    = X86DESC_LIMIT_G(&DescSS.Legacy);
    3024         uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
     3033        uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
    30253034
    30263035        /* Check that there is sufficient space on the stack. */
     
    30503059
    30513060
    3052         if (fIsNewTSS386)
     3061        if (fIsNewTss386)
    30533062            rcStrict = iemMemStackPushU32(pVCpu, uErr);
    30543063        else
     
    30573066        {
    30583067            Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
    3059                  fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
     3068                 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
    30603069            return rcStrict;
    30613070        }
     
    32013210                                   && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
    32023211        uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
    3203         RTSEL          SelTSS   = Idte.Gate.u16Sel;
     3212        RTSEL          SelTss   = Idte.Gate.u16Sel;
    32043213
    32053214        /*
     
    32073216         */
    32083217        IEMSELDESC DescTSS;
    3209         rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
     3218        rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
    32103219        if (rcStrict != VINF_SUCCESS)
    32113220        {
    3212             Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
     3221            Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
    32133222                 VBOXSTRICTRC_VAL(rcStrict)));
    32143223            return rcStrict;
     
    32213230        {
    32223231            Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
    3223                  u8Vector, SelTSS, DescTSS.Legacy.au64));
    3224             return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
     3232                 u8Vector, SelTss, DescTSS.Legacy.au64));
     3233            return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
    32253234        }
    32263235
     
    32283237        if (!DescTSS.Legacy.Gen.u1Present)
    32293238        {
    3230             Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
    3231             return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
     3239            Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
     3240            return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
    32323241        }
    32333242
     
    32353244        return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
    32363245                             (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
    3237                              fFlags, uErr, uCr2, SelTSS, &DescTSS);
     3246                             fFlags, uErr, uCr2, SelTss, &DescTSS);
    32383247    }
    32393248
     
    33843393
    33853394        /* Create the stack frame. */
     3395        uint8_t    bUnmapInfoStackFrame;
    33863396        RTPTRUNION uStackFrame;
    3387         rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
     3397        rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
    33883398                             uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
    33893399                             IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
    33903400        if (rcStrict != VINF_SUCCESS)
    33913401            return rcStrict;
    3392         void * const pvStackFrame = uStackFrame.pv;
    33933402        if (f32BitGate)
    33943403        {
     
    34293438            }
    34303439        }
    3431         rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
     3440        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
    34323441        if (rcStrict != VINF_SUCCESS)
    34333442            return rcStrict;
     
    34873496    {
    34883497        uint64_t        uNewRsp;
     3498        uint8_t         bUnmapInfoStackFrame;
    34893499        RTPTRUNION      uStackFrame;
    34903500        uint8_t const   cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
    3491         rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
     3501        rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
     3502                                               &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
    34923503        if (rcStrict != VINF_SUCCESS)
    34933504            return rcStrict;
    3494         void * const pvStackFrame = uStackFrame.pv;
    34953505
    34963506        if (f32BitGate)
     
    35103520            uStackFrame.pu16[2] = fEfl;
    35113521        }
    3512         rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
     3522        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
    35133523        if (rcStrict != VINF_SUCCESS)
    35143524            return rcStrict;
     
    37473757
    37483758    /* Create the stack frame. */
     3759    uint8_t    bUnmapInfoStackFrame;
    37493760    uint32_t   cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
    37503761    RTPTRUNION uStackFrame;
    3751     rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
     3762    rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
    37523763                         uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
    37533764    if (rcStrict != VINF_SUCCESS)
    37543765        return rcStrict;
    3755     void * const pvStackFrame = uStackFrame.pv;
    37563766
    37573767    if (fFlags & IEM_XCPT_FLAGS_ERR)
     
    37623772    uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
    37633773    uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
    3764     rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
     3774    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
    37653775    if (rcStrict != VINF_SUCCESS)
    37663776        return rcStrict;
     
    60316041 */
    60326042static VBOXSTRICTRC
    6033 iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
     6043iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
     6044                               size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
    60346045{
    60356046    Assert(cbMem <= GUEST_PAGE_SIZE);
     
    61586169    iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
    61596170    *ppvMem = pbBuf;
     6171    *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    61606172    return VINF_SUCCESS;
    61616173}
     
    61656177 * iemMemMap woker that deals with iemMemPageMap failures.
    61666178 */
    6167 static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
     6179static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
    61686180                                              RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
    61696181{
     
    62446256    iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
    62456257    *ppvMem = pbBuf;
     6258    *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    62466259    return VINF_SUCCESS;
    62476260}
     
    62636276 * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    62646277 * @param   ppvMem      Where to return the pointer to the mapped memory.
     6278 * @param   bUnmapInfo  Where to return unmap info to be passed to
     6279 *                      iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
     6280 *                      done.
    62656281 * @param   cbMem       The number of bytes to map.  This is usually 1, 2, 4, 6,
    62666282 *                      8, 12, 16, 32 or 512.  When used by string operations
     
    62826298 *                      Pass zero to skip alignment.
    62836299 */
    6284 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
     6300VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
    62856301                       uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
    62866302{
     
    63196335    { /* likely */ }
    63206336    else
    6321         return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
     6337        return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
    63226338
    63236339    /*
     
    64846500    { /* probably likely */ }
    64856501    else
    6486         return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
     6502        return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
    64876503                                         pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
    64886504                                           pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
     
    65036519        rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    65046520        if (rcStrict != VINF_SUCCESS)
    6505             return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
     6521            return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
    65066522    }
    65076523
     
    65286544    rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    65296545    if (rcStrict != VINF_SUCCESS)
    6530         return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
     6546        return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
    65316547
    65326548#endif /* !IEM_WITH_DATA_TLB */
     
    65426558    iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
    65436559    *ppvMem = pvMem;
     6560    *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
     6561    AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
     6562    AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
    65446563
    65456564    return VINF_SUCCESS;
     
    65526571 * @returns Strict VBox status code.
    65536572 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6554  * @param   pvMem               The mapping.
    6555  * @param   fAccess             The kind of access.
    6556  */
    6557 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
    6558 {
    6559     int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
    6560     AssertReturn(iMemMap >= 0, iMemMap);
     6573 * @param   bUnmapInfo          Unmap info set by iemMemMap.
     6574 */
     6575VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
     6576{
     6577    uintptr_t const iMemMap = bUnmapInfo & 0x7;
     6578    AssertMsgReturn(   (bUnmapInfo & 0x08)
     6579                    && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
     6580                    && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
     6581                    ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
     6582                    VERR_NOT_FOUND);
    65616583
    65626584    /* If it's bounce buffered, we may need to write back the buffer. */
     
    65826604 *
    65836605 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6584  * @param   pvMem               The mapping.
    6585  * @param   fAccess             The kind of access.
    6586  */
    6587 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
    6588 {
    6589     int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
    6590     AssertReturnVoid(iMemMap >= 0);
     6606 * @param   bUnmapInfo          Unmap info set by iemMemMap.
     6607 */
     6608void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
     6609{
     6610    uintptr_t const iMemMap = bUnmapInfo & 0x7;
     6611    AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
     6612                        && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
     6613                        &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
     6614                           == ((unsigned)bUnmapInfo >> 4),
     6615                        ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    65916616
    65926617    /* Unlock it if necessary. */
     
    66166641 *
    66176642 * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
     6643 * @param   bUnmapInfo  Where to return unmap info to be passed to
     6644 *                      iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
     6645 *                      iemMemCommitAndUnmapWoSafeJmp,
     6646 *                      iemMemCommitAndUnmapRoSafeJmp,
     6647 *                      iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
     6648 *                      when done.
    66186649 * @param   cbMem       The number of bytes to map.  This is usually 1,
    66196650 *                      2, 4, 6, 8, 12, 16, 32 or 512.  When used by
     
    66366667 *                      Pass zero to skip alignment.
    66376668 */
    6638 void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
     6669void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
    66396670                   uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
    66406671{
     
    67056736    {
    67066737        void *pvMem;
    6707         rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
     6738        rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
    67086739        if (rcStrict == VINF_SUCCESS)
    67096740            return pvMem;
     
    68426873        else
    68436874        {
    6844             rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
     6875            rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
    68456876                                                 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
    68466877                                                   pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
     
    68666897        rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    68676898        if (rcStrict == VINF_SUCCESS)
     6899        {
     6900            *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    68686901            return pbMem;
     6902        }
    68696903        IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    68706904    }
     
    68966930    else
    68976931    {
    6898         rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
     6932        rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
    68996933        if (rcStrict == VINF_SUCCESS)
    69006934            return pvMem;
     
    69136947
    69146948    iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
     6949
     6950    *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    69156951    return pvMem;
    69166952}
     
    69246960 * @param   fAccess             The kind of access.
    69256961 */
    6926 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
    6927 {
    6928     int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
    6929     AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
     6962void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
     6963{
     6964    uintptr_t const iMemMap = bUnmapInfo & 0x7;
     6965    AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
     6966                        && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
     6967                        &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
     6968                           == ((unsigned)bUnmapInfo >> 4),
     6969                        ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    69306970
    69316971    /* If it's bounce buffered, we may need to write back the buffer. */
     
    69526992
    69536993/** Fallback for iemMemCommitAndUnmapRwJmp.  */
    6954 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    6955 {
    6956     Assert(bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); RT_NOREF_PV(bMapInfo);
    6957     iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_RW);
     6994void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
     6995{
     6996    Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
     6997    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    69586998}
    69596999
    69607000
    69617001/** Fallback for iemMemCommitAndUnmapWoJmp.  */
    6962 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    6963 {
    6964     Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); RT_NOREF_PV(bMapInfo);
    6965     iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_W);
     7002void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
     7003{
     7004    Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
     7005    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    69667006}
    69677007
    69687008
    69697009/** Fallback for iemMemCommitAndUnmapRoJmp.  */
    6970 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    6971 {
    6972     Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);
    6973     iemMemCommitAndUnmapJmp(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);
     7010void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
     7011{
     7012    Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
     7013    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    69747014}
    69757015
    69767016
    69777017/** Fallback for iemMemRollbackAndUnmapWo.  */
    6978 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT
    6979 {
    6980     Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);
    6981     iemMemRollbackAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);
     7018void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
     7019{
     7020    Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
     7021    iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
    69827022}
    69837023
     
    69987038 * @param   fAccess             The kind of access.
    69997039 */
    7000 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
    7001 {
    7002     int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
    7003     AssertReturn(iMemMap >= 0, iMemMap);
     7040VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
     7041{
     7042    uintptr_t const iMemMap = bUnmapInfo & 0x7;
     7043    AssertMsgReturn(   (bUnmapInfo & 0x08)
     7044                    && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
     7045                    &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
     7046                       == ((unsigned)bUnmapInfo >> 4),
     7047                    ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
     7048                    VERR_NOT_FOUND);
    70047049
    70057050    /* If it's bounce buffered, we may need to write back the buffer. */
     
    71317176{
    71327177    /* The lazy approach for now... */
     7178    uint8_t         bUnmapInfo;
    71337179    uint32_t const *pu32Src;
    7134     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
     7180    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
    71357181                                IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
    71367182    if (rc == VINF_SUCCESS)
    71377183    {
    71387184        *pu64Dst = *pu32Src;
    7139         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
     7185        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    71407186        Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
    71417187    }
     
    71587204{
    71597205    /* The lazy approach for now... */
     7206    uint8_t        bUnmapInfo;
    71607207    int32_t const *pi32Src;
    7161     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
     7208    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
    71627209                                IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
    71637210    if (rc == VINF_SUCCESS)
    71647211    {
    71657212        *pu64Dst = *pi32Src;
    7166         rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
     7213        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    71677214        Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
    71687215    }
     
    71927239{
    71937240    /* The lazy approach for now... */
     7241    uint8_t      bUnmapInfo;
    71947242    PCRTUINT128U pu128Src;
    7195     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
     7243    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
    71967244                                IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    71977245    if (rc == VINF_SUCCESS)
     
    71997247        pu128Dst->au64[0] = pu128Src->au64[0];
    72007248        pu128Dst->au64[1] = pu128Src->au64[1];
    7201         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
     7249        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    72027250        Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
    72037251    }
     
    72237271{
    72247272    /* The lazy approach for now... */
    7225     PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
     7273    uint8_t      bUnmapInfo;
     7274    PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
    72267275                                                       (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    72277276    pu128Dst->au64[0] = pu128Src->au64[0];
    72287277    pu128Dst->au64[1] = pu128Src->au64[1];
    7229     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
     7278    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    72307279    Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
    72317280}
     
    72467295{
    72477296    /* The lazy approach for now... */
     7297    uint8_t      bUnmapInfo;
    72487298    PCRTUINT256U pu256Src;
    7249     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
     7299    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
    72507300                                IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
    72517301    if (rc == VINF_SUCCESS)
     
    72557305        pu256Dst->au64[2] = pu256Src->au64[2];
    72567306        pu256Dst->au64[3] = pu256Src->au64[3];
    7257         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
     7307        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    72587308        Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    72597309    }
     
    72757325{
    72767326    /* The lazy approach for now... */
    7277     PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
     7327    uint8_t      bUnmapInfo;
     7328    PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
    72787329                                                       IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
    72797330    pu256Dst->au64[0] = pu256Src->au64[0];
     
    72817332    pu256Dst->au64[2] = pu256Src->au64[2];
    72827333    pu256Dst->au64[3] = pu256Src->au64[3];
    7283     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
     7334    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    72847335    Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    72857336}
     
    73037354{
    73047355    /* The lazy approach for now... */
     7356    uint8_t      bUnmapInfo;
    73057357    PCRTUINT256U pu256Src;
    7306     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
     7358    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
    73077359                                IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    73087360    if (rc == VINF_SUCCESS)
     
    73127364        pu256Dst->au64[2] = pu256Src->au64[2];
    73137365        pu256Dst->au64[3] = pu256Src->au64[3];
    7314         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
     7366        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    73157367        Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    73167368    }
     
    73367388{
    73377389    /* The lazy approach for now... */
    7338     PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
     7390    uint8_t      bUnmapInfo;
     7391    PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
    73397392                                                       (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    73407393    pu256Dst->au64[0] = pu256Src->au64[0];
     
    73427395    pu256Dst->au64[2] = pu256Src->au64[2];
    73437396    pu256Dst->au64[3] = pu256Src->au64[3];
    7344     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
     7397    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    73457398    Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    73467399}
     
    74337486{
    74347487    /* The lazy approach for now... */
    7435     PRTUINT128U pu128Dst;
    7436     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
     7488    uint8_t      bUnmapInfo;
     7489    PRTUINT128U  pu128Dst;
     7490    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
    74377491                                (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    74387492    if (rc == VINF_SUCCESS)
     
    74407494        pu128Dst->au64[0] = u128Value.au64[0];
    74417495        pu128Dst->au64[1] = u128Value.au64[1];
    7442         rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
     7496        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    74437497        Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
    74447498    }
     
    74627516{
    74637517    /* The lazy approach for now... */
    7464     PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
     7518    uint8_t     bUnmapInfo;
     7519    PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
    74657520                                                     (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
    74667521    pu128Dst->au64[0] = u128Value.au64[0];
    74677522    pu128Dst->au64[1] = u128Value.au64[1];
    7468     iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
     7523    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    74697524    Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
    74707525}
     
    74857540{
    74867541    /* The lazy approach for now... */
    7487     PRTUINT256U pu256Dst;
    7488     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7542    uint8_t      bUnmapInfo;
     7543    PRTUINT256U  pu256Dst;
     7544    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
    74897545                                IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
    74907546    if (rc == VINF_SUCCESS)
     
    74947550        pu256Dst->au64[2] = pu256Value->au64[2];
    74957551        pu256Dst->au64[3] = pu256Value->au64[3];
    7496         rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
     7552        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    74977553        Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    74987554    }
     
    75147570{
    75157571    /* The lazy approach for now... */
    7516     PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7572    uint8_t     bUnmapInfo;
     7573    PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
    75177574                                                     IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
    75187575    pu256Dst->au64[0] = pu256Value->au64[0];
     
    75207577    pu256Dst->au64[2] = pu256Value->au64[2];
    75217578    pu256Dst->au64[3] = pu256Value->au64[3];
    7522     iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
     7579    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    75237580    Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    75247581}
     
    75397596{
    75407597    /* The lazy approach for now... */
    7541     PRTUINT256U pu256Dst;
    7542     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7598    uint8_t      bUnmapInfo;
     7599    PRTUINT256U  pu256Dst;
     7600    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
    75437601                                IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
    75447602    if (rc == VINF_SUCCESS)
     
    75487606        pu256Dst->au64[2] = pu256Value->au64[2];
    75497607        pu256Dst->au64[3] = pu256Value->au64[3];
    7550         rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
     7608        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    75517609        Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    75527610    }
     
    75707628{
    75717629    /* The lazy approach for now... */
    7572     PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
     7630    uint8_t     bUnmapInfo;
     7631    PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
    75737632                                                     IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
    75747633    pu256Dst->au64[0] = pu256Value->au64[0];
     
    75767635    pu256Dst->au64[2] = pu256Value->au64[2];
    75777636    pu256Dst->au64[3] = pu256Value->au64[3];
    7578     iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
     7637    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    75797638    Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    75807639}
     
    76307689 *                              don't commit register until the commit call
    76317690 *                              succeeds.
     7691 * @param   pbUnmapInfo         Where to store unmap info for
     7692 *                              iemMemStackPushCommitSpecial.
    76327693 * @param   puNewRsp            Where to return the new RSP value.  This must be
    76337694 *                              passed unchanged to
     
    76357696 */
    76367697VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
    7637                                          void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
     7698                                         void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
    76387699{
    76397700    Assert(cbMem < UINT8_MAX);
    7640     RTGCPTR     GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
    7641     return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
    7642                      IEM_ACCESS_STACK_W, cbAlign);
     7701    RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
     7702    return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
    76437703}
    76447704
     
    76567716 *                              iemMemStackPushBeginSpecial().
    76577717 */
    7658 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
    7659 {
    7660     VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
     7718VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
     7719{
     7720    VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    76617721    if (rcStrict == VINF_SUCCESS)
    76627722        pVCpu->cpum.GstCtx.rsp = uNewRsp;
     
    76757735 * @param   cbAlign             The alignment mask (7, 3, 1).
    76767736 * @param   ppvMem              Where to return the pointer to the stack memory.
     7737 * @param   pbUnmapInfo         Where to store unmap info for
     7738 *                              iemMemStackPopDoneSpecial.
    76777739 * @param   puNewRsp            Where to return the new RSP value.  This must be
    76787740 *                              assigned to CPUMCTX::rsp manually some time
     
    76817743 */
    76827744VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
    7683                                         void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
     7745                                        void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
    76847746{
    76857747    Assert(cbMem < UINT8_MAX);
    76867748    RTGCPTR     GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
    7687     return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
     7749    return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
    76887750}
    76897751
     
    77017763 * @param   cbMem               The number of bytes to pop from the stack.
    77027764 * @param   ppvMem              Where to return the pointer to the stack memory.
     7765 * @param   pbUnmapInfo         Where to store unmap info for
     7766 *                              iemMemStackPopDoneSpecial.
    77037767 * @param   uCurNewRsp          The current uncommitted RSP value.  (No need to
    77047768 *                              return this because all use of this function is
     
    77077771 */
    77087772VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
    7709                                            void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
     7773                                           void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
    77107774{
    77117775    Assert(cbMem < UINT8_MAX);
     
    77207784        GCPtrTop = (uint16_t)uCurNewRsp;
    77217785
    7722     return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
     7786    return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
    77237787                     0 /* checked in iemMemStackPopBeginSpecial */);
    77247788}
     
    77337797 * @returns Strict VBox status code.
    77347798 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7735  * @param   pvMem               The pointer returned by
     7799 * @param   bUnmapInfo          Unmap information returned by
    77367800 *                              iemMemStackPopBeginSpecial() or
    77377801 *                              iemMemStackPopContinueSpecial().
    77387802 */
    7739 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
    7740 {
    7741     return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
     7803VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
     7804{
     7805    return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    77427806}
    77437807
     
    77567820{
    77577821    /* The lazy approach for now... */
     7822    uint8_t        bUnmapInfo;
    77587823    uint8_t const *pbSrc;
    7759     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
     7824    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    77607825    if (rc == VINF_SUCCESS)
    77617826    {
    77627827        *pbDst = *pbSrc;
    7763         rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
     7828        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    77647829    }
    77657830    return rc;
     
    77807845{
    77817846    /* The lazy approach for now... */
     7847    uint8_t         bUnmapInfo;
    77827848    uint16_t const *pu16Src;
    7783     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
     7849    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    77847850    if (rc == VINF_SUCCESS)
    77857851    {
    77867852        *pu16Dst = *pu16Src;
    7787         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
     7853        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    77887854    }
    77897855    return rc;
     
    78047870{
    78057871    /* The lazy approach for now... */
     7872    uint8_t         bUnmapInfo;
    78067873    uint32_t const *pu32Src;
    7807     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
     7874    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    78087875    if (rc == VINF_SUCCESS)
    78097876    {
    78107877        *pu32Dst = *pu32Src;
    7811         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
     7878        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    78127879    }
    78137880    return rc;
     
    78287895{
    78297896    /* The lazy approach for now... */
     7897    uint8_t         bUnmapInfo;
    78307898    uint64_t const *pu64Src;
    7831     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
     7899    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
    78327900    if (rc == VINF_SUCCESS)
    78337901    {
    78347902        *pu64Dst = *pu64Src;
    7835         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
     7903        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    78367904    }
    78377905    return rc;
     
    79658033     */
    79668034    VBOXSTRICTRC        rcStrict;
     8035    uint8_t             bUnmapInfo;
    79678036    uint32_t volatile  *pu32;
    79688037    if ((GCPtr & 3) == 0)
     
    79708039        /* The normal case, map the 32-bit bits around the accessed bit (40). */
    79718040        GCPtr += 2 + 2;
    7972         rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
     8041        rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
    79738042        if (rcStrict != VINF_SUCCESS)
    79748043            return rcStrict;
     
    79788047    {
    79798048        /* The misaligned GDT/LDT case, map the whole thing. */
    7980         rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
     8049        rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
    79818050        if (rcStrict != VINF_SUCCESS)
    79828051            return rcStrict;
     
    79908059    }
    79918060
    7992     return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
     8061    return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    79938062}
    79948063
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

    r101450 r102430  
    432432    else
    433433    {
    434         uint16_t const *pa16Mem = NULL;
    435         rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1);
     434        uint8_t         bUnmapInfo;
     435        uint16_t const *pau16Mem = NULL;
     436        rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrStart,
     437                             IEM_ACCESS_STACK_R, sizeof(*pau16Mem) - 1);
    436438        if (rcStrict == VINF_SUCCESS)
    437439        {
    438             pVCpu->cpum.GstCtx.di = pa16Mem[7 - X86_GREG_xDI];
    439             pVCpu->cpum.GstCtx.si = pa16Mem[7 - X86_GREG_xSI];
    440             pVCpu->cpum.GstCtx.bp = pa16Mem[7 - X86_GREG_xBP];
     440            pVCpu->cpum.GstCtx.di = pau16Mem[7 - X86_GREG_xDI];
     441            pVCpu->cpum.GstCtx.si = pau16Mem[7 - X86_GREG_xSI];
     442            pVCpu->cpum.GstCtx.bp = pau16Mem[7 - X86_GREG_xBP];
    441443            /* skip sp */
    442             pVCpu->cpum.GstCtx.bx = pa16Mem[7 - X86_GREG_xBX];
    443             pVCpu->cpum.GstCtx.dx = pa16Mem[7 - X86_GREG_xDX];
    444             pVCpu->cpum.GstCtx.cx = pa16Mem[7 - X86_GREG_xCX];
    445             pVCpu->cpum.GstCtx.ax = pa16Mem[7 - X86_GREG_xAX];
    446             rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
     444            pVCpu->cpum.GstCtx.bx = pau16Mem[7 - X86_GREG_xBX];
     445            pVCpu->cpum.GstCtx.dx = pau16Mem[7 - X86_GREG_xDX];
     446            pVCpu->cpum.GstCtx.cx = pau16Mem[7 - X86_GREG_xCX];
     447            pVCpu->cpum.GstCtx.ax = pau16Mem[7 - X86_GREG_xAX];
     448            rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    447449            if (rcStrict == VINF_SUCCESS)
    448450            {
     
    512514    else
    513515    {
    514         uint32_t const *pa32Mem;
    515         rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1);
     516        uint8_t         bUnmapInfo;
     517        uint32_t const *pau32Mem;
     518        rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrStart,
     519                             IEM_ACCESS_STACK_R, sizeof(*pau32Mem) - 1);
    516520        if (rcStrict == VINF_SUCCESS)
    517521        {
    518             pVCpu->cpum.GstCtx.rdi = pa32Mem[7 - X86_GREG_xDI];
    519             pVCpu->cpum.GstCtx.rsi = pa32Mem[7 - X86_GREG_xSI];
    520             pVCpu->cpum.GstCtx.rbp = pa32Mem[7 - X86_GREG_xBP];
     522            pVCpu->cpum.GstCtx.rdi = pau32Mem[7 - X86_GREG_xDI];
     523            pVCpu->cpum.GstCtx.rsi = pau32Mem[7 - X86_GREG_xSI];
     524            pVCpu->cpum.GstCtx.rbp = pau32Mem[7 - X86_GREG_xBP];
    521525            /* skip esp */
    522             pVCpu->cpum.GstCtx.rbx = pa32Mem[7 - X86_GREG_xBX];
    523             pVCpu->cpum.GstCtx.rdx = pa32Mem[7 - X86_GREG_xDX];
    524             pVCpu->cpum.GstCtx.rcx = pa32Mem[7 - X86_GREG_xCX];
    525             pVCpu->cpum.GstCtx.rax = pa32Mem[7 - X86_GREG_xAX];
    526             rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
     526            pVCpu->cpum.GstCtx.rbx = pau32Mem[7 - X86_GREG_xBX];
     527            pVCpu->cpum.GstCtx.rdx = pau32Mem[7 - X86_GREG_xDX];
     528            pVCpu->cpum.GstCtx.rcx = pau32Mem[7 - X86_GREG_xCX];
     529            pVCpu->cpum.GstCtx.rax = pau32Mem[7 - X86_GREG_xAX];
     530            rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    527531            if (rcStrict == VINF_SUCCESS)
    528532            {
     
    583587    {
    584588        GCPtrBottom--;
    585         uint16_t *pa16Mem = NULL;
    586         rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1);
     589        uint8_t   bUnmapInfo;
     590        uint16_t *pau16Mem = NULL;
     591        rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrBottom,
     592                             IEM_ACCESS_STACK_W, sizeof(*pau16Mem) - 1);
    587593        if (rcStrict == VINF_SUCCESS)
    588594        {
    589             pa16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
    590             pa16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
    591             pa16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
    592             pa16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
    593             pa16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
    594             pa16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
    595             pa16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
    596             pa16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
    597             rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
     595            pau16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
     596            pau16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
     597            pau16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
     598            pau16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
     599            pau16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
     600            pau16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
     601            pau16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
     602            pau16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
     603            rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    598604            if (rcStrict == VINF_SUCCESS)
    599605            {
     
    654660    {
    655661        GCPtrBottom--;
    656         uint32_t *pa32Mem;
    657         rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1);
     662        uint8_t   bUnmapInfo;
     663        uint32_t *pau32Mem;
     664        rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrBottom,
     665                             IEM_ACCESS_STACK_W, sizeof(*pau32Mem) - 1);
    658666        if (rcStrict == VINF_SUCCESS)
    659667        {
    660             pa32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
    661             pa32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
    662             pa32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
    663             pa32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
    664             pa32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
    665             pa32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
    666             pa32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
    667             pa32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
    668             rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);
     668            pau32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
     669            pau32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
     670            pau32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
     671            pau32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
     672            pau32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
     673            pau32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
     674            pau32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
     675            pau32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
     676            rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    669677            if (rcStrict == VINF_SUCCESS)
    670678            {
     
    13791387        if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < IEM_GET_CPL(pVCpu)))
    13801388        {
    1381             uint16_t    offNewStack;    /* Offset of new stack in TSS. */
    1382             uint16_t    cbNewStack;     /* Number of bytes the stack information takes up in TSS. */
    1383             uint8_t     uNewCSDpl;
    1384             uint8_t     cbWords;
    1385             RTSEL       uNewSS;
    1386             RTSEL       uOldSS;
    1387             uint64_t    uOldRsp;
    1388             IEMSELDESC  DescSS;
    1389             RTPTRUNION  uPtrTSS;
    1390             RTGCPTR     GCPtrTSS;
    1391             RTPTRUNION  uPtrParmWds;
    1392             RTGCPTR     GCPtrParmWds;
    1393 
    13941389            /* More privilege. This is the fun part. */
    13951390            Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF));    /* Filtered out above. */
     
    14011396
    14021397            /* Figure out where the new stack pointer is stored in the TSS. */
    1403             uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
     1398            uint8_t const uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
     1399            uint16_t      offNewStack;    /* Offset of new stack in TSS. */
     1400            uint16_t      cbNewStack;     /* Number of bytes the stack information takes up in TSS. */
    14041401            if (!IEM_IS_LONG_MODE(pVCpu))
    14051402            {
     
    14301427            }
    14311428
    1432             GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
    1433             rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0);
     1429            uint8_t     bUnmapInfo;
     1430            RTPTRUNION  uPtrTss;
     1431            RTGCPTR     GCPtrTss = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
     1432            rcStrict = iemMemMap(pVCpu, &uPtrTss.pv, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrTss, IEM_ACCESS_SYS_R, 0);
    14341433            if (rcStrict != VINF_SUCCESS)
    14351434            {
     
    14381437            }
    14391438
     1439            RTSEL       uNewSS;
    14401440            if (!IEM_IS_LONG_MODE(pVCpu))
    14411441            {
    14421442                if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
    14431443                {
    1444                     uNewRsp = uPtrTSS.pu32[0];
    1445                     uNewSS  = uPtrTSS.pu16[2];
     1444                    uNewRsp = uPtrTss.pu32[0];
     1445                    uNewSS  = uPtrTss.pu16[2];
    14461446                }
    14471447                else
    14481448                {
    14491449                    Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
    1450                     uNewRsp = uPtrTSS.pu16[0];
    1451                     uNewSS  = uPtrTSS.pu16[1];
     1450                    uNewRsp = uPtrTss.pu16[0];
     1451                    uNewSS  = uPtrTss.pu16[1];
    14521452                }
    14531453            }
     
    14561456                Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
    14571457                /* SS will be a NULL selector, but that's valid. */
    1458                 uNewRsp = uPtrTSS.pu64[0];
     1458                uNewRsp = uPtrTss.pu64[0];
    14591459                uNewSS  = uNewCSDpl;
    14601460            }
    14611461
    14621462            /* Done with the TSS now. */
    1463             rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);
     1463            rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    14641464            if (rcStrict != VINF_SUCCESS)
    14651465            {
     
    14691469
    14701470            /* Only used outside of long mode. */
    1471             cbWords = pDesc->Legacy.Gate.u5ParmCount;
     1471            uint8_t const cbWords = pDesc->Legacy.Gate.u5ParmCount;
    14721472
    14731473            /* If EFER.LMA is 0, there's extra work to do. */
     1474            IEMSELDESC DescSS;
    14741475            if (!IEM_IS_LONG_MODE(pVCpu))
    14751476            {
     
    15481549
    15491550            /* Remember the old SS:rSP and their linear address. */
    1550             uOldSS  = pVCpu->cpum.GstCtx.ss.Sel;
    1551             uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
    1552 
    1553             GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
     1551            RTSEL const    uOldSS  = pVCpu->cpum.GstCtx.ss.Sel;
     1552            uint64_t const uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
     1553
     1554            RTGCPTR const GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
    15541555
    15551556            /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
     
    15601561            void    *pvNewFrame;
    15611562            RTGCPTR  GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
    1562             rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
     1563            rcStrict = iemMemMap(pVCpu, &pvNewFrame, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
    15631564            if (rcStrict != VINF_SUCCESS)
    15641565            {
     
    15661567                return rcStrict;
    15671568            }
    1568             rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);
     1569            rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    15691570            if (rcStrict != VINF_SUCCESS)
    15701571            {
     
    15871588            /* At this point the stack access must not fail because new state was already committed. */
    15881589            /** @todo this can still fail due to SS.LIMIT not check.   */
     1590            uint8_t bUnmapInfoRet;
    15891591            rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
    15901592                                                   IEM_IS_LONG_MODE(pVCpu) ? 7
    15911593                                                   : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
    1592                                                    &uPtrRet.pv, &uNewRsp);
     1594                                                   &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp);
    15931595            AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
    15941596                            VERR_INTERNAL_ERROR_5);
     
    16011603                    {
    16021604                        /* Map the relevant chunk of the old stack. */
    1603                         rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds,
     1605                        RTPTRUNION uPtrParmWds;
     1606                        rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 4, UINT8_MAX, GCPtrParmWds,
    16041607                                             IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
    16051608                        if (rcStrict != VINF_SUCCESS)
     
    16141617
    16151618                        /* Unmap the old stack. */
    1616                         rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
     1619                        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    16171620                        if (rcStrict != VINF_SUCCESS)
    16181621                        {
     
    16371640                    {
    16381641                        /* Map the relevant chunk of the old stack. */
    1639                         rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds,
     1642                        RTPTRUNION uPtrParmWds;
     1643                        rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 2, UINT8_MAX, GCPtrParmWds,
    16401644                                             IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
    16411645                        if (rcStrict != VINF_SUCCESS)
     
    16501654
    16511655                        /* Unmap the old stack. */
    1652                         rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);
     1656                        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    16531657                        if (rcStrict != VINF_SUCCESS)
    16541658                        {
     
    16781682            }
    16791683
    1680             rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
     1684            rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp);
    16811685            if (rcStrict != VINF_SUCCESS)
    16821686            {
     
    17441748            /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
    17451749             *        16-bit code cause a two or four byte CS to be pushed? */
     1750            uint8_t bUnmapInfoRet;
    17461751            rcStrict = iemMemStackPushBeginSpecial(pVCpu,
    17471752                                                   IEM_IS_LONG_MODE(pVCpu) ? 8+8
     
    17491754                                                   IEM_IS_LONG_MODE(pVCpu) ? 7
    17501755                                                   : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
    1751                                                    &uPtrRet.pv, &uNewRsp);
     1756                                                   &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp);
    17521757            if (rcStrict != VINF_SUCCESS)
    17531758                return rcStrict;
     
    18151820            }
    18161821
    1817             rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
     1822            rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp);
    18181823            if (rcStrict != VINF_SUCCESS)
    18191824                return rcStrict;
     
    21052110    uint64_t        uNewRsp;
    21062111    RTPTRUNION      uPtrRet;
     2112    uint8_t         bUnmapInfo;
    21072113
    21082114    /*
     
    21192125        rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
    21202126                                               enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
    2121                                                &uPtrRet.pv, &uNewRsp);
     2127                                               &uPtrRet.pv, &bUnmapInfo, &uNewRsp);
    21222128        if (rcStrict != VINF_SUCCESS)
    21232129            return rcStrict;
     
    21392145            uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
    21402146        }
    2141         rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
     2147        rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
    21422148        if (rcStrict != VINF_SUCCESS)
    21432149            return rcStrict;
     
    22282234                                           enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
    22292235                                           enmEffOpSize == IEMMODE_64BIT ? 7   : enmEffOpSize == IEMMODE_32BIT ? 3   : 1,
    2230                                            &uPtrRet.pv, &uNewRsp);
     2236                                           &uPtrRet.pv, &bUnmapInfo, &uNewRsp);
    22312237    if (rcStrict != VINF_SUCCESS)
    22322238        return rcStrict;
     
    22902296        uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
    22912297    }
    2292     rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);
     2298    rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
    22932299    if (rcStrict != VINF_SUCCESS)
    22942300        return rcStrict;
     
    23252331IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
    23262332{
    2327     VBOXSTRICTRC    rcStrict;
     2333    NOREF(cbInstr);
     2334
     2335    /*
     2336     * Read the stack values first.
     2337     */
     2338    RTUINT64U       NewRsp;
     2339    uint8_t         bUnmapInfo;
    23282340    RTCPTRUNION     uPtrFrame;
    2329     RTUINT64U       NewRsp;
     2341    uint32_t        cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
     2342                             : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
     2343    VBOXSTRICTRC rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
     2344                                                       enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
     2345                                                       &uPtrFrame.pv, &bUnmapInfo, &NewRsp.u);
     2346    if (rcStrict != VINF_SUCCESS)
     2347        return rcStrict;
     2348
    23302349    uint64_t        uNewRip;
    23312350    uint16_t        uNewCs;
    2332     NOREF(cbInstr);
    2333 
    2334     /*
    2335      * Read the stack values first.
    2336      */
    2337     uint32_t        cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
    2338                              : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
    2339     rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
    2340                                           enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
    2341                                           &uPtrFrame.pv, &NewRsp.u);
    2342     if (rcStrict != VINF_SUCCESS)
    2343         return rcStrict;
    23442351    if (enmEffOpSize == IEMMODE_16BIT)
    23452352    {
     
    23572364        uNewCs  = uPtrFrame.pu16[4];
    23582365    }
    2359     rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
     2366
     2367    rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
    23602368    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    23612369    { /* extremely likely */ }
     
    24642472    {
    24652473        /* Read the outer stack pointer stored *after* the parameters. */
    2466         rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, NewRsp.u);
     2474        rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, &bUnmapInfo, NewRsp.u);
    24672475        if (rcStrict != VINF_SUCCESS)
    24682476            return rcStrict;
     
    24852493            uNewOuterSs   = uPtrFrame.pu16[4];
    24862494        }
    2487         rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);
     2495        rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
    24882496        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    24892497        { /* extremely likely */ }
     
    31033111    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
    31043112    VBOXSTRICTRC    rcStrict;
     3113    uint8_t         bUnmapInfo;
    31053114    RTCPTRUNION     uFrame;
    31063115    uint16_t        uNewCs;
     
    31103119    if (enmEffOpSize == IEMMODE_32BIT)
    31113120    {
    3112         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &uNewRsp);
     3121        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
    31133122        if (rcStrict != VINF_SUCCESS)
    31143123            return rcStrict;
     
    31293138    else
    31303139    {
    3131         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
     3140        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
    31323141        if (rcStrict != VINF_SUCCESS)
    31333142            return rcStrict;
     
    31443153            uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
    31453154    }
    3146     rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);
     3155    rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
    31473156    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    31483157    { /* extremely likely */ }
     
    32513260     * Pop the V8086 specific frame bits off the stack.
    32523261     */
    3253     VBOXSTRICTRC    rcStrict;
    3254     RTCPTRUNION     uFrame;
    3255     rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, uNewRsp);
     3262    uint8_t      bUnmapInfo;
     3263    RTCPTRUNION  uFrame;
     3264    VBOXSTRICTRC rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
    32563265    if (rcStrict != VINF_SUCCESS)
    32573266        return rcStrict;
     
    32623271    uint16_t uNewFs  = uFrame.pu32[4];
    32633272    uint16_t uNewGs  = uFrame.pu32[5];
    3264     rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
     3273    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
    32653274    if (rcStrict != VINF_SUCCESS)
    32663275        return rcStrict;
     
    33823391     */
    33833392    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     3393    uint8_t         bUnmapInfo;
    33843394    VBOXSTRICTRC    rcStrict;
    33853395    RTCPTRUNION     uFrame;
     
    33903400    if (enmEffOpSize == IEMMODE_32BIT)
    33913401    {
    3392         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &uNewRsp);
     3402        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp);
    33933403        if (rcStrict != VINF_SUCCESS)
    33943404            return rcStrict;
     
    33993409    else
    34003410    {
    3401         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &uNewRsp);
     3411        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
    34023412        if (rcStrict != VINF_SUCCESS)
    34033413            return rcStrict;
     
    34063416        uNewFlags  = uFrame.pu16[2];
    34073417    }
    3408     rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
     3418    rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
    34093419    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    34103420    { /* extremely likely */ }
     
    34913501        if (enmEffOpSize == IEMMODE_32BIT)
    34923502        {
    3493             rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, uNewRsp);
     3503            rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
    34943504            if (rcStrict != VINF_SUCCESS)
    34953505                return rcStrict;
     
    35023512        else
    35033513        {
    3504             rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, uNewRsp);
     3514            rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
    35053515            if (rcStrict != VINF_SUCCESS)
    35063516                return rcStrict;
     
    35083518            uNewSS  = uFrame.pu16[1];
    35093519        }
    3510         rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
     3520        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    35113521        if (rcStrict != VINF_SUCCESS)
    35123522            return rcStrict;
     
    37373747     */
    37383748    VBOXSTRICTRC    rcStrict;
     3749    uint8_t         bUnmapInfo;
    37393750    RTCPTRUNION     uFrame;
    37403751    uint64_t        uNewRip;
     
    37453756    if (enmEffOpSize == IEMMODE_64BIT)
    37463757    {
    3747         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &uNewRsp);
     3758        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &bUnmapInfo, &uNewRsp);
    37483759        if (rcStrict != VINF_SUCCESS)
    37493760            return rcStrict;
     
    37563767    else if (enmEffOpSize == IEMMODE_32BIT)
    37573768    {
    3758         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &uNewRsp);
     3769        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp);
    37593770        if (rcStrict != VINF_SUCCESS)
    37603771            return rcStrict;
     
    37683779    {
    37693780        Assert(enmEffOpSize == IEMMODE_16BIT);
    3770         rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &uNewRsp);
     3781        rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
    37713782        if (rcStrict != VINF_SUCCESS)
    37723783            return rcStrict;
     
    37773788        uNewSs     = uFrame.pu16[4];
    37783789    }
    3779     rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */
     3790    rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
    37803791    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    37813792    { /* extremely like */ }
     
    41524163    }
    41534164
    4154     uint8_t const *pbMem = NULL;
    4155     uint16_t const *pa16Mem;
    4156     uint8_t const *pa8Mem;
    4157     RTGCPHYS GCPtrStart = 0x800;    /* Fixed table location. */
    4158     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
     4165    uint8_t        bUnmapInfo;
     4166    uint8_t const *pbMem      = NULL;
     4167    RTGCPHYS       GCPtrStart = 0x800;    /* Fixed table location. */
     4168    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, &bUnmapInfo, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
    41594169    if (rcStrict != VINF_SUCCESS)
    41604170        return rcStrict;
    41614171
    41624172    /* The MSW is at offset 0x06. */
    4163     pa16Mem = (uint16_t const *)(pbMem + 0x06);
     4173    uint16_t const *pau16Mem = (uint16_t const *)(pbMem + 0x06);
    41644174    /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
    41654175    uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    4166     uNewCr0 |= *pa16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
     4176    uNewCr0 |= *pau16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    41674177    uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
    41684178
     
    41814191
    41824192    /* TR selector is at offset 0x16. */
    4183     pa16Mem = (uint16_t const *)(pbMem + 0x16);
    4184     pVCpu->cpum.GstCtx.tr.Sel      = pa16Mem[0];
    4185     pVCpu->cpum.GstCtx.tr.ValidSel = pa16Mem[0];
     4193    pau16Mem = (uint16_t const *)(pbMem + 0x16);
     4194    pVCpu->cpum.GstCtx.tr.Sel      = pau16Mem[0];
     4195    pVCpu->cpum.GstCtx.tr.ValidSel = pau16Mem[0];
    41864196    pVCpu->cpum.GstCtx.tr.fFlags   = CPUMSELREG_FLAGS_VALID;
    41874197
    41884198    /* Followed by FLAGS... */
    4189     pVCpu->cpum.GstCtx.eflags.u = pa16Mem[1] | X86_EFL_1;
    4190     pVCpu->cpum.GstCtx.ip       = pa16Mem[2];   /* ...and IP. */
     4199    pVCpu->cpum.GstCtx.eflags.u = pau16Mem[1] | X86_EFL_1;
     4200    pVCpu->cpum.GstCtx.ip       = pau16Mem[2];   /* ...and IP. */
    41914201
    41924202    /* LDT is at offset 0x1C. */
    4193     pa16Mem = (uint16_t const *)(pbMem + 0x1C);
    4194     pVCpu->cpum.GstCtx.ldtr.Sel      = pa16Mem[0];
    4195     pVCpu->cpum.GstCtx.ldtr.ValidSel = pa16Mem[0];
     4203    pau16Mem = (uint16_t const *)(pbMem + 0x1C);
     4204    pVCpu->cpum.GstCtx.ldtr.Sel      = pau16Mem[0];
     4205    pVCpu->cpum.GstCtx.ldtr.ValidSel = pau16Mem[0];
    41964206    pVCpu->cpum.GstCtx.ldtr.fFlags   = CPUMSELREG_FLAGS_VALID;
    41974207
    41984208    /* Segment registers are at offset 0x1E. */
    4199     pa16Mem = (uint16_t const *)(pbMem + 0x1E);
    4200     iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa16Mem[0]);
    4201     iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa16Mem[1]);
    4202     iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa16Mem[2]);
    4203     iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa16Mem[3]);
     4209    pau16Mem = (uint16_t const *)(pbMem + 0x1E);
     4210    iemLoadallSetSelector(pVCpu, X86_SREG_DS, pau16Mem[0]);
     4211    iemLoadallSetSelector(pVCpu, X86_SREG_SS, pau16Mem[1]);
     4212    iemLoadallSetSelector(pVCpu, X86_SREG_CS, pau16Mem[2]);
     4213    iemLoadallSetSelector(pVCpu, X86_SREG_ES, pau16Mem[3]);
    42044214
    42054215    /* GPRs are at offset 0x26. */
    4206     pa16Mem = (uint16_t const *)(pbMem + 0x26);
    4207     pVCpu->cpum.GstCtx.di = pa16Mem[0];
    4208     pVCpu->cpum.GstCtx.si = pa16Mem[1];
    4209     pVCpu->cpum.GstCtx.bp = pa16Mem[2];
    4210     pVCpu->cpum.GstCtx.sp = pa16Mem[3];
    4211     pVCpu->cpum.GstCtx.bx = pa16Mem[4];
    4212     pVCpu->cpum.GstCtx.dx = pa16Mem[5];
    4213     pVCpu->cpum.GstCtx.cx = pa16Mem[6];
    4214     pVCpu->cpum.GstCtx.ax = pa16Mem[7];
     4216    pau16Mem = (uint16_t const *)(pbMem + 0x26);
     4217    pVCpu->cpum.GstCtx.di = pau16Mem[0];
     4218    pVCpu->cpum.GstCtx.si = pau16Mem[1];
     4219    pVCpu->cpum.GstCtx.bp = pau16Mem[2];
     4220    pVCpu->cpum.GstCtx.sp = pau16Mem[3];
     4221    pVCpu->cpum.GstCtx.bx = pau16Mem[4];
     4222    pVCpu->cpum.GstCtx.dx = pau16Mem[5];
     4223    pVCpu->cpum.GstCtx.cx = pau16Mem[6];
     4224    pVCpu->cpum.GstCtx.ax = pau16Mem[7];
    42154225
    42164226    /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
     
    42214231
    42224232    /* GDTR contents are at offset 0x4E, 6 bytes. */
    4223     RTGCPHYS GCPtrBase;
    4224     uint16_t cbLimit;
    4225     pa8Mem = pbMem + 0x4E;
     4233    uint8_t const *pau8Mem = pbMem + 0x4E;
    42264234    /* NB: Fourth byte "should be zero"; we are ignoring it. */
    4227     GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
    4228     cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
     4235    RTGCPHYS GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16);
     4236    uint16_t cbLimit   = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8);
    42294237    CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
    42304238
    42314239    /* IDTR contents are at offset 0x5A, 6 bytes. */
    4232     pa8Mem = pbMem + 0x5A;
    4233     GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);
    4234     cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);
     4240    pau8Mem  = pbMem + 0x5A;
     4241    GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16);
     4242    cbLimit   = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8);
    42354243    CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
    42364244
     
    42424250    Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
    42434251
    4244     rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);
     4252    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    42454253    if (rcStrict != VINF_SUCCESS)
    42464254        return rcStrict;
     
    57315739     *       assembly and such.
    57325740     */
    5733     void *pvDesc;
    5734     rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL),
    5735                          IEM_ACCESS_DATA_RW, 0);
     5741    uint8_t bUnmapInfo;
     5742    void   *pvDesc;
     5743    rcStrict = iemMemMap(pVCpu, &pvDesc, &bUnmapInfo, 8, UINT8_MAX,
     5744                         pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW, 0);
    57365745    if (rcStrict != VINF_SUCCESS)
    57375746        return rcStrict;
     
    57435752        case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 -  8); break;
    57445753    }
    5745     rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);
     5754    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    57465755    if (rcStrict != VINF_SUCCESS)
    57475756        return rcStrict;
     
    81618170                            if (cbToMap > 512)
    81628171                                cbToMap = 512;
     8172                            uint8_t      bUnmapInfo;
    81638173                            void        *pvSrc    = NULL;
    8164                             VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, cbToMap, UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
     8174                            VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, &bUnmapInfo, cbToMap,
     8175                                                              UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
    81658176                            if (rcStrict == VINF_SUCCESS)
    81668177                            {
    81678178                                RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:");
    8168                                 rcStrict = iemMemCommitAndUnmap(pVCpu, pvSrc, IEM_ACCESS_DATA_R);
     8179                                rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    81698180                                AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
    81708181                            }
     
    86548665 * Implements 'CMPXCHG16B' fallback using rendezvous.
    86558666 */
    8656 IEM_CIMPL_DEF_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
    8657                 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags)
     8667IEM_CIMPL_DEF_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
     8668                PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo)
    86588669{
    86598670# ifdef IN_RING3
     
    86728683    {
    86738684        /* Duplicated tail code. */
    8674         rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);
     8685        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    86758686        if (rcStrict == VINF_SUCCESS)
    86768687        {
     
    86868697    return rcStrict;
    86878698# else
    8688     RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     8699    RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags, bUnmapInfo);
    86898700    return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now.  Should perhaps be replaced later. */
    86908701# endif
     
    88188829     * Access the memory.
    88198830     */
    8820     void *pvMem512;
    8821     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
     8831    uint8_t bUnmapInfo;
     8832    void   *pvMem512;
     8833    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512,
     8834                                      iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
    88228835                                      15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    88238836    if (rcStrict != VINF_SUCCESS)
     
    88818894     * Commit the memory.
    88828895     */
    8883     rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     8896    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    88848897    if (rcStrict != VINF_SUCCESS)
    88858898        return rcStrict;
     
    89118924     * Access the memory.
    89128925     */
    8913     void *pvMem512;
    8914     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
     8926    uint8_t bUnmapInfo;
     8927    void   *pvMem512;
     8928    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
    89158929                                      15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    89168930    if (rcStrict != VINF_SUCCESS)
     
    89989012     * Unmap the memory.
    89999013     */
    9000     rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
     9014    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    90019015    if (rcStrict != VINF_SUCCESS)
    90029016        return rcStrict;
     
    90519065     */
    90529066    /* The x87+SSE state.  */
    9053     void *pvMem512;
    9054     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
     9067    uint8_t bUnmapInfoMem512;
     9068    void   *pvMem512;
     9069    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512,
     9070                                      iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
    90559071                                      63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    90569072    if (rcStrict != VINF_SUCCESS)
     
    90609076
    90619077    /* The header.  */
     9078    uint8_t      bUnmapInfoHdr;
    90629079    PX86XSAVEHDR pHdr;
    9063     rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
     9080    rcStrict = iemMemMap(pVCpu, (void **)&pHdr, &bUnmapInfoHdr, sizeof(pHdr),
     9081                         iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
    90649082    if (rcStrict != VINF_SUCCESS)
    90659083        return rcStrict;
     
    91199137
    91209138    /* Commit the x87 state bits. (probably wrong) */
    9121     rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     9139    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512);
    91229140    if (rcStrict != VINF_SUCCESS)
    91239141        return rcStrict;
     
    91309148        /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
    91319149        AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
     9150        uint8_t         bUnmapInfoComp;
    91329151        PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
    91339152        PX86XSAVEYMMHI  pCompDst;
    9134         rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
     9153        rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, &bUnmapInfoComp, sizeof(*pCompDst), iEffSeg,
     9154                             GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
    91359155                             IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
    91369156        if (rcStrict != VINF_SUCCESS)
     
    91419161            pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
    91429162
    9143         rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     9163        rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp);
    91449164        if (rcStrict != VINF_SUCCESS)
    91459165            return rcStrict;
     
    91529172                   | (fReqComponents & fXInUse);
    91539173
    9154     rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);
     9174    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr);
    91559175    if (rcStrict != VINF_SUCCESS)
    91569176        return rcStrict;
     
    92079227     */
    92089228    /* The x87+SSE state.  */
    9209     void *pvMem512;
    9210     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
     9229    uint8_t bUnmapInfoMem512;
     9230    void   *pvMem512;
     9231    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
    92119232                                      63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
    92129233    if (rcStrict != VINF_SUCCESS)
     
    92189239     * Calc the requested mask
    92199240     */
     9241    uint8_t       bUnmapInfoHdr;
    92209242    PX86XSAVEHDR  pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
    92219243    PCX86XSAVEHDR pHdrSrc;
    9222     rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
     9244    rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, &bUnmapInfoHdr, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,
    92239245                         IEM_ACCESS_DATA_R, 0 /* checked above */);
    92249246    if (rcStrict != VINF_SUCCESS)
     
    92369258
    92379259    /* We won't need this any longer. */
    9238     rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);
     9260    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr);
    92399261    if (rcStrict != VINF_SUCCESS)
    92409262        return rcStrict;
     
    93359357
    93369358    /* Unmap the x87 state bits (so we've don't run out of mapping). */
    9337     rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);
     9359    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512);
    93389360    if (rcStrict != VINF_SUCCESS)
    93399361        return rcStrict;
     
    93509372        {
    93519373            /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
     9374            uint8_t         bUnmapInfoComp;
    93529375            PCX86XSAVEYMMHI pCompSrc;
    9353             rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),
     9376            rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, &bUnmapInfoComp, sizeof(*pCompDst),
    93549377                                 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
    93559378                                 IEM_ACCESS_DATA_R, 0 /* checked above */);
     
    93639386            }
    93649387
    9365             rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);
     9388            rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp);
    93669389            if (rcStrict != VINF_SUCCESS)
    93679390                return rcStrict;
     
    96529675IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
    96539676{
     9677    uint8_t      bUnmapInfo;
    96549678    RTPTRUNION   uPtr;
    9655     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
     9679    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
    96569680                                      iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
    96579681                                      enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
     
    96619685    iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
    96629686
    9663     rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     9687    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    96649688    if (rcStrict != VINF_SUCCESS)
    96659689        return rcStrict;
     
    96969720    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
    96979721
     9722    uint8_t      bUnmapInfo;
    96989723    RTPTRUNION   uPtr;
    9699     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
     9724    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
    97009725                                      iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
    97019726    if (rcStrict != VINF_SUCCESS)
     
    97129737    }
    97139738
    9714     rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
     9739    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    97159740    if (rcStrict != VINF_SUCCESS)
    97169741        return rcStrict;
     
    97489773IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
    97499774{
     9775    uint8_t      bUnmapInfo;
    97509776    RTCPTRUNION  uPtr;
    9751     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
     9777    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
    97529778                                      iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
    97539779                                      enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
     
    97579783    iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
    97589784
    9759     rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
     9785    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    97609786    if (rcStrict != VINF_SUCCESS)
    97619787        return rcStrict;
     
    97759801IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
    97769802{
     9803    uint8_t      bUnmapInfo;
    97779804    RTCPTRUNION  uPtr;
    9778     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
     9805    VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
    97799806                                      iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
    97809807    if (rcStrict != VINF_SUCCESS)
     
    97929819    }
    97939820
    9794     rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
     9821    rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    97959822    if (rcStrict != VINF_SUCCESS)
    97969823        return rcStrict;
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h

    r100804 r102430  
    12121212#endif
    12131213
     1214    uint8_t         bUnmapInfo;
    12141215    OP_TYPE        *puMem;
    1215     rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI,
     1216    rcStrict = iemMemMap(pVCpu, (void **)&puMem, &bUnmapInfo, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI,
    12161217                         IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
    12171218    if (rcStrict != VINF_SUCCESS)
     
    12271228        *puMem = (OP_TYPE)u32Value;
    12281229# ifdef IN_RING3
    1229         VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
     1230        VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    12301231# else
    1231         VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
     1232        VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, bUnmapInfo);
    12321233# endif
    12331234        if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
     
    14171418        do
    14181419        {
     1420            uint8_t  bUnmapInfo;
    14191421            OP_TYPE *puMem;
    1420             rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg,
     1422            rcStrict = iemMemMap(pVCpu, (void **)&puMem, &bUnmapInfo, OP_SIZE / 8, X86_SREG_ES, uAddrReg,
    14211423                                 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
    14221424            if (rcStrict != VINF_SUCCESS)
     
    14331435            *puMem = (OP_TYPE)u32Value;
    14341436# ifdef IN_RING3
    1435             VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
     1437            VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    14361438# else
    1437             VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
     1439            VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, bUnmapInfo);
    14381440# endif
    14391441            if (rcStrict2 == VINF_SUCCESS)
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstTwoByte0f.cpp.h

    r102428 r102430  
    1248112481         * the patterns IEMAllThrdPython.py requires for the code morphing.
    1248212482         */
    12483 #define BODY_CMPXCHG16B_HEAD \
    12484             IEM_MC_BEGIN(4, 4, IEM_MC_F_64BIT, 0); \
     12483#define BODY_CMPXCHG16B_HEAD(bUnmapInfoStmt) \
     12484            IEM_MC_BEGIN(5, 4, IEM_MC_F_64BIT, 0); \
    1248512485            IEM_MC_LOCAL(RTGCPTR,               GCPtrEffDst); \
    1248612486            IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \
     
    1248812488            \
    1248912489            IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(GCPtrEffDst, 16); \
    12490             IEM_MC_LOCAL(uint8_t,               bUnmapInfo); \
     12490            bUnmapInfoStmt; \
    1249112491            IEM_MC_ARG(PRTUINT128U,             pu128MemDst,                0); \
    1249212492            IEM_MC_MEM_MAP_U128_RW(pu128MemDst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \
     
    1251812518                && (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
    1251912519            {
    12520                 BODY_CMPXCHG16B_HEAD;
     12520                BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo));
    1252112521                IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
    1252212522                BODY_CMPXCHG16B_TAIL;
     
    1252412524            else
    1252512525            {
    12526                 BODY_CMPXCHG16B_HEAD;
     12526                BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo));
    1252712527                IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
    1252812528                BODY_CMPXCHG16B_TAIL;
     
    1253312533            if (pVCpu->CTX_SUFF(pVM)->cCpus == 1)
    1253412534            {
    12535                 BODY_CMPXCHG16B_HEAD;
     12535                BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo));
    1253612536                IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
    1253712537                BODY_CMPXCHG16B_TAIL;
     
    1253912539            else
    1254012540            {
    12541                 BODY_CMPXCHG16B_HEAD;
    12542                 IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS,
     12541                BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4));
     12542                IEM_MC_CALL_CIMPL_5(IEM_CIMPL_F_STATUS_FLAGS,
    1254312543                                      RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xAX)
    1254412544                                    | RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xDX),
    12545                                     iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
     12545                                    iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx,
     12546                                                                             pEFlags, bUnmapInfo);
    1254612547                IEM_MC_END();
    1254712548            }
     
    1255212553        if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))
    1255312554        {
    12554             BODY_CMPXCHG16B_HEAD;
     12555            BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo));
    1255512556            IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
    1255612557            BODY_CMPXCHG16B_TAIL;
     
    1255812559        else
    1255912560        {
    12560             BODY_CMPXCHG16B_HEAD;
     12561            BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo));
    1256112562            IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
    1256212563            BODY_CMPXCHG16B_TAIL;
     
    1257012571        if (pVCpu->CTX_SUFF(pVM)->cCpus == 1)
    1257112572        {
    12572             BODY_CMPXCHG16B_HEAD;
     12573            BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo));
    1257312574            IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags);
    1257412575            BODY_CMPXCHG16B_TAIL;
     
    1257612577        else
    1257712578        {
    12578             BODY_CMPXCHG16B_HEAD;
     12579            BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4));
    1257912580            IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS,
    1258012581                                  RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xAX)
  • trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h

    r102427 r102430  
    5555{
    5656    /* The lazy approach for now... */
     57    uint8_t              bUnmapInfo;
    5758    TMPL_MEM_TYPE const *puSrc;
    58     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(*puSrc), iSegReg, GCPtrMem,
     59    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
    5960                                IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
    6061    if (rc == VINF_SUCCESS)
    6162    {
    6263        *puDst = *puSrc;
    63         rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);
     64        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    6465        Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
    6566    }
     
    7980    pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
    8081#  endif
    81     TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*pSrc), iSegReg, GCPtrMem,
     82    uint8_t              bUnmapInfo;
     83    TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
    8284                                                                    IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
    8385    *pDst = *pSrc;
    84     iemMemCommitAndUnmapJmp(pVCpu, (void *)pSrc, IEM_ACCESS_DATA_R);
     86    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    8587    Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
    8688}
     
    9294    pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
    9395#  endif
    94     TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*puSrc), iSegReg, GCPtrMem,
     96    uint8_t              bUnmapInfo;
     97    TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
    9598                                                                     IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
    9699    TMPL_MEM_TYPE const  uRet = *puSrc;
    97     iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);
     100    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    98101    Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
    99102    return uRet;
     
    118121{
    119122    /* The lazy approach for now... */
     123    uint8_t        bUnmapInfo;
    120124    TMPL_MEM_TYPE *puDst;
    121     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(*puDst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
     125    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst),
     126                                iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
    122127    if (rc == VINF_SUCCESS)
    123128    {
     
    127132        *puDst = uValue;
    128133#endif
    129         rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_DATA_W);
     134        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    130135#ifdef TMPL_MEM_BY_REF
    131136        Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
     
    163168    Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
    164169#endif
    165     TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(*puDst), iSegReg, GCPtrMem,
     170    uint8_t        bUnmapInfo;
     171    TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
    166172                                                         IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
    167173#ifdef TMPL_MEM_BY_REF
     
    170176    *puDst = uValue;
    171177#endif
    172     iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_DATA_W);
     178    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    173179}
    174180#endif /* IEM_WITH_SETJMP */
     
    196202    Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    197203    *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
    198     return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
     204    return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
     205                                         IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
    199206}
    200207
     
    219226    Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    220227    *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
    221     return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
     228    return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
     229                                         IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
    222230}
    223231
     
    242250    Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    243251    *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
    244     return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
     252    return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
     253                                         IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
    245254}
    246255
     
    265274
    266275    /* Write the dword the lazy way. */
     276    uint8_t        bUnmapInfo;
    267277    TMPL_MEM_TYPE *puDst;
    268     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
    269                                 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
     278    VBOXSTRICTRC   rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
     279                                  IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
    270280    if (rc == VINF_SUCCESS)
    271281    {
    272282        *puDst = uValue;
    273         rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);
     283        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    274284
    275285        /* Commit the new RSP value unless we an access handler made trouble. */
     
    302312
    303313    /* Write the word the lazy way. */
     314    uint8_t              bUnmapInfo;
    304315    TMPL_MEM_TYPE const *puSrc;
    305     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
     316    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
    306317                                IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
    307318    if (rc == VINF_SUCCESS)
    308319    {
    309320        *puValue = *puSrc;
    310         rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
     321        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    311322
    312323        /* Commit the new RSP value. */
     
    339350
    340351    /* Write the word the lazy way. */
     352    uint8_t        bUnmapInfo;
    341353    TMPL_MEM_TYPE *puDst;
    342     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
    343                                 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
     354    VBOXSTRICTRC   rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
     355                                  IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
    344356    if (rc == VINF_SUCCESS)
    345357    {
    346358        *puDst = uValue;
    347         rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);
     359        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    348360
    349361        /* Commit the new RSP value unless we an access handler made trouble. */
     
    377389
    378390    /* Write the word the lazy way. */
     391    uint8_t              bUnmapInfo;
    379392    TMPL_MEM_TYPE const *puSrc;
    380     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
     393    VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
    381394                                IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
    382395    if (rc == VINF_SUCCESS)
    383396    {
    384397        *puValue = *puSrc;
    385         rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
     398        rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
    386399
    387400        /* Commit the new RSP value. */
     
    414427
    415428    /* Write the data. */
    416     TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
     429    uint8_t        bUnmapInfo;
     430    TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
    417431                                                         IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
    418432    *puDst = uValue;
    419     iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);
     433    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    420434
    421435    /* Commit the RSP change. */
     
    440454
    441455    /* Read the data. */
    442     TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
    443                                                                      IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
     456    uint8_t              bUnmapInfo;
     457    TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
     458                                                                     GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
    444459    TMPL_MEM_TYPE const  uRet = *puSrc;
    445     iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
     460    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    446461
    447462    /* Commit the RSP change and return the popped value. */
     
    478493     * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
    479494     * ancient hardware when it actually did change. */
    480     uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
     495    uint8_t   bUnmapInfo;
     496    uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
    481497                                               IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
    482498    *puDst = (uint16_t)uValue;
    483     iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);
     499    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    484500
    485501    /* Commit the RSP change. */
  • trunk/src/VBox/VMM/include/IEMInline.h

    r102428 r102430  
    37153715#ifdef IEM_WITH_SETJMP
    37163716
    3717 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
     3717DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    37183718{
    37193719# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     
    37213721        return;
    37223722# endif
    3723     iemMemCommitAndUnmapRwSafeJmp(pVCpu, pvMem, bMapInfo);
    3724 }
    3725 
    3726 
    3727 DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
     3723    iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
     3724}
     3725
     3726
     3727DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    37283728{
    37293729# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     
    37313731        return;
    37323732# endif
    3733     iemMemCommitAndUnmapWoSafeJmp(pVCpu, pvMem, bMapInfo);
    3734 }
    3735 
    3736 
    3737 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
     3733    iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
     3734}
     3735
     3736
     3737DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    37383738{
    37393739# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     
    37413741        return;
    37423742# endif
    3743     iemMemCommitAndUnmapRoSafeJmp(pVCpu, pvMem, bMapInfo);
    3744 }
    3745 
    3746 DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT
     3743    iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
     3744}
     3745
     3746DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
    37473747{
    37483748# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     
    37503750        return;
    37513751# endif
    3752     iemMemRollbackAndUnmapWoSafe(pVCpu, pvMem, bMapInfo);
     3752    iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
    37533753}
    37543754
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r102428 r102430  
    50395039#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
    50405040
    5041 VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
     5041VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
    50425042                          uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
    5043 VBOXSTRICTRC    iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
     5043VBOXSTRICTRC    iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    50445044#ifndef IN_RING3
    5045 VBOXSTRICTRC    iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
    5046 #endif
    5047 void            iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
     5045VBOXSTRICTRC    iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
     5046#endif
     5047void            iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    50485048void            iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
    50495049VBOXSTRICTRC    iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
     
    51815181PCRTUINT128U    iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    51825182
    5183 void            iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    5184 void            iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    5185 void            iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    5186 void            iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT;
     5183void            iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
     5184void            iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
     5185void            iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
     5186void            iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
     5187void            iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    51875188#endif
    51885189
    51895190VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
    5190                                             void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
    5191 VBOXSTRICTRC    iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
     5191                                            void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
     5192VBOXSTRICTRC    iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT;
    51925193VBOXSTRICTRC    iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
    51935194VBOXSTRICTRC    iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
     
    51985199VBOXSTRICTRC    iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
    51995200VBOXSTRICTRC    iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
    5200                                            void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
     5201                                           void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
    52015202VBOXSTRICTRC    iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
    5202                                               void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT;
    5203 VBOXSTRICTRC    iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
     5203                                              void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT;
     5204VBOXSTRICTRC    iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    52045205VBOXSTRICTRC    iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
    52055206VBOXSTRICTRC    iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
     
    53345335IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
    53355336IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
    5336 IEM_CIMPL_PROTO_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
    5337                   PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags);
     5337IEM_CIMPL_PROTO_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
     5338                  PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo);
    53385339IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
    53395340IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
  • trunk/src/VBox/VMM/include/IEMMc.h

    r102429 r102430  
    15191519 */
    15201520#ifndef IEM_WITH_SETJMP
    1521 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1522         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \
    1523                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)); \
    1524         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    1525     } while (0)
     1521# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1522    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
     1523                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
    15261524#else
    15271525# define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    15401538 */
    15411539#ifndef IEM_WITH_SETJMP
    1542 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1543         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \
    1544                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)); \
    1545         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    1546     } while (0)
     1540# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1541    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
     1542                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
    15471543#else
    15481544# define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    15611557 */
    15621558#ifndef IEM_WITH_SETJMP
    1563 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1564         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \
    1565                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)); \
    1566         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    1567     } while (0)
     1559# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1560    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
     1561                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
    15681562#else
    15691563# define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    15821576 */
    15831577#ifndef IEM_WITH_SETJMP
    1584 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1585         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \
    1586                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)); \
    1587         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    1588     } while (0)
     1578# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     1579    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
     1580                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
    15891581#else
    15901582# define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    16031595 */
    16041596#ifndef IEM_WITH_SETJMP
    1605 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1606         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \
    1607                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)); \
    1608         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    1609     } while (0)
     1597# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     1598    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
     1599                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
    16101600#else
    16111601# define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    16241614 */
    16251615#ifndef IEM_WITH_SETJMP
    1626 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1627         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \
    1628                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)); \
    1629         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    1630     } while (0)
     1616# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     1617    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
     1618                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
    16311619#else
    16321620# define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    16481636 */
    16491637#ifndef IEM_WITH_SETJMP
    1650 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1651         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \
    1652                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)); \
    1653         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    1654     } while (0)
     1638# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1639    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
     1640                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
    16551641#else
    16561642# define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    16691655 */
    16701656#ifndef IEM_WITH_SETJMP
    1671 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1672         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \
    1673                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)); \
    1674         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    1675     } while (0)
     1657# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1658    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
     1659                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
    16761660#else
    16771661# define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    16901674 */
    16911675#ifndef IEM_WITH_SETJMP
    1692 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1693         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \
    1694                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)); \
    1695         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    1696     } while (0)
     1676# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1677    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
     1678                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
    16971679#else
    16981680# define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    17111693 */
    17121694#ifndef IEM_WITH_SETJMP
    1713 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1714         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \
    1715                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)); \
    1716         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    1717     } while (0)
     1695# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1696    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
     1697                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
    17181698#else
    17191699# define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    17321712 */
    17331713#ifndef IEM_WITH_SETJMP
    1734 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1735         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \
    1736                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)); \
    1737         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    1738     } while (0)
     1714# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1715    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
     1716                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
    17391717#else
    17401718# define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    17531731 */
    17541732#ifndef IEM_WITH_SETJMP
    1755 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1756         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \
    1757                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)); \
    1758         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    1759     } while (0)
     1733# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1734   IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
     1735                                      (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
    17601736#else
    17611737# define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    17951771 */
    17961772#ifndef IEM_WITH_SETJMP
    1797 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1798         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \
    1799                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)); \
    1800         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    1801     } while (0)
     1773# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1774    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
     1775                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
    18021776#else
    18031777# define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    18161790 */
    18171791#ifndef IEM_WITH_SETJMP
    1818 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1819         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \
    1820                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)); \
    1821         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    1822     } while (0)
     1792# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1793    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
     1794                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
    18231795#else
    18241796# define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    18371809 */
    18381810#ifndef IEM_WITH_SETJMP
    1839 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1840         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \
    1841                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)); \
    1842         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    1843     } while (0)
     1811# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1812    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
     1813                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
    18441814#else
    18451815# define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    18581828 */
    18591829#ifndef IEM_WITH_SETJMP
    1860 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1861         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \
    1862                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)); \
    1863         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    1864     } while (0)
     1830# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1831    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
     1832                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
    18651833#else
    18661834# define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    18791847 */
    18801848#ifndef IEM_WITH_SETJMP
    1881 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1882         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \
    1883                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)); \
    1884         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    1885     } while (0)
     1849# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1850    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
     1851                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
    18861852#else
    18871853# define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    19001866 */
    19011867#ifndef IEM_WITH_SETJMP
    1902 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    1903         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \
    1904                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)); \
    1905         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    1906     } while (0)
     1868# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1869    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
     1870                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
    19071871#else
    19081872# define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    19601924 */
    19611925#ifndef IEM_WITH_SETJMP
    1962 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1963         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \
    1964                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)); \
    1965         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    1966     } while (0)
     1926# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1927    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
     1928                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
    19671929#else
    19681930# define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    19811943 */
    19821944#ifndef IEM_WITH_SETJMP
    1983 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    1984         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \
    1985                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
    1986         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    1987     } while (0)
     1945# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1946    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
     1947                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    19881948#else
    19891949# define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    20021962 */
    20031963#ifndef IEM_WITH_SETJMP
    2004 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    2005         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \
    2006                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)); \
    2007         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    2008     } while (0)
     1964# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1965    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
     1966                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
    20091967#else
    20101968# define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    20231981 */
    20241982#ifndef IEM_WITH_SETJMP
    2025 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2026         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \
    2027                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)); \
    2028         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    2029     } while (0)
     1983# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     1984    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
     1985                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
    20301986#else
    20311987# define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    20442000 */
    20452001#ifndef IEM_WITH_SETJMP
    2046 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2047         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \
    2048                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
    2049         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    2050     } while (0)
     2002# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     2003    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
     2004                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    20512005#else
    20522006# define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    20652019 */
    20662020#ifndef IEM_WITH_SETJMP
    2067 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2068         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \
    2069                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)); \
    2070         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    2071     } while (0)
     2021# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     2022    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
     2023                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
    20722024#else
    20732025# define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    21252077 */
    21262078#ifndef IEM_WITH_SETJMP
    2127 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    2128         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128U), (a_iSeg), \
    2129                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1)); \
    2130         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    2131     } while (0)
     2079# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     2080    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
     2081                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1))
    21322082#else
    21332083# define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    21462096 */
    21472097#ifndef IEM_WITH_SETJMP
    2148 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    2149         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), (a_iSeg), \
    2150                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)); \
    2151         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    2152     } while (0)
     2098# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     2099    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
     2100                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
    21532101#else
    21542102# define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    21672115 */
    21682116#ifndef IEM_WITH_SETJMP
    2169 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    2170         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), (a_iSeg), \
    2171                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)); \
    2172         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    2173     } while (0)
     2117# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     2118    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
     2119                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
    21742120#else
    21752121# define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    21882134 */
    21892135#ifndef IEM_WITH_SETJMP
    2190 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2191         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), UINT8_MAX, \
    2192                                            (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1)); \
    2193         a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \
    2194     } while (0)
     2136# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     2137    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
     2138                                       (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1))
    21952139#else
    21962140# define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    22092153 */
    22102154#ifndef IEM_WITH_SETJMP
    2211 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2212         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), UINT8_MAX, \
    2213                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)); \
    2214         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    2215     } while (0)
     2155# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     2156    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
     2157                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
    22162158#else
    22172159# define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    22302172 */
    22312173#ifndef IEM_WITH_SETJMP
    2232 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2233         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), UINT8_MAX, \
    2234                                            (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)); \
    2235         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \
    2236     } while (0)
     2174# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     2175    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
     2176                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
    22372177#else
    22382178# define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    22542194 */
    22552195#ifndef IEM_WITH_SETJMP
    2256 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    2257         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), (a_iSeg), \
    2258                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
    2259         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    2260     } while (0)
     2196# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     2197    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
     2198                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    22612199#else
    22622200# define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    22742212 */
    22752213#ifndef IEM_WITH_SETJMP
    2276 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2277         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), UINT8_MAX, \
    2278                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
    2279         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    2280     } while (0)
     2214# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
     2215    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
     2216                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    22812217#else
    22822218# define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    22962232 */
    22972233#ifndef IEM_WITH_SETJMP
    2298 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \
    2299         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), sizeof(RTFLOAT80U), (a_iSeg), \
    2300                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
    2301         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    2302     } while (0)
     2234# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     2235    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
     2236                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    23032237#else
    23042238# define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     
    23162250 */
    23172251#ifndef IEM_WITH_SETJMP
    2318 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) do { \
    2319         IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), sizeof(RTFLOAT80U), UINT8_MAX, \
    2320                                            (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \
    2321         a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \
    2322     } while (0)
     2252# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
     2253    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
     2254                                       (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    23232255#else
    23242256# define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
     
    23342266 */
    23352267#ifndef IEM_WITH_SETJMP
    2336 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) do { \
    2337         RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); \
    2338         IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_RW)); \
    2339     } while (0)
    2340 #else
    2341 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) \
    2342     iemMemCommitAndUnmapRwJmp(pVCpu, (a_pvMem), (a_bMapInfo))
     2268# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo)    IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
     2269#else
     2270# define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo)    iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
    23432271#endif
    23442272
     
    23472275 */
    23482276#ifndef IEM_WITH_SETJMP
    2349 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \
    2350         RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \
    2351         IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \
    2352     } while (0)
    2353 #else
    2354 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \
    2355     iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), (a_bMapInfo))
     2277# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo)    IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
     2278#else
     2279# define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo)    iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
    23562280#endif
    23572281
     
    23602284 */
    23612285#ifndef IEM_WITH_SETJMP
    2362 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) do { \
    2363         RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); \
    2364         IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (void *)(a_pvMem), IEM_ACCESS_DATA_R)); \
    2365     } while (0)
    2366 #else
    2367 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) \
    2368     iemMemCommitAndUnmapRoJmp(pVCpu, (a_pvMem), (a_bMapInfo))
     2286# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo)    IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
     2287#else
     2288# define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo)    iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
    23692289#endif
    23702290
     
    23782298 *
    23792299 * @remarks     May in theory return - for now.
    2380  *
    2381  * @deprecated
    2382  */
    2383 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
    2384     do { \
    2385         if (   !(a_u16FSW & X86_FSW_ES) \
    2386             || !(  (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
    2387                  & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
    2388             IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
    2389     } while (0)
    2390 
    2391 
    2392 /** Commits the memory and unmaps the guest memory unless the FPU status word
    2393  * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
    2394  * that would cause FLD not to store.
    2395  *
    2396  * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
    2397  * store, while \#P will not.
    2398  *
    2399  * @remarks     May in theory return - for now.
    2400  */
    2401 #ifndef IEM_WITH_SETJMP
    2402 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \
    2403         RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \
    2404         if (   !(a_u16FSW & X86_FSW_ES) \
    2405             || !(  (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
    2406                  & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
    2407             IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \
    2408         else \
    2409             iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \
    2410     } while (0)
    2411 #else
     2300 */
     2301#ifndef IEM_WITH_SETJMP
    24122302# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \
    24132303        if (   !(a_u16FSW & X86_FSW_ES) \
    24142304            || !(  (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
    24152305                 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
    2416             iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), a_bMapInfo); \
     2306            IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)); \
    24172307        else \
    2418             iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo); \
     2308            iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \
     2309    } while (0)
     2310#else
     2311# define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \
     2312        if (   !(a_u16FSW & X86_FSW_ES) \
     2313            || !(  (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
     2314                 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
     2315            iemMemCommitAndUnmapWoJmp(pVCpu, a_bMapInfo); \
     2316        else \
     2317            iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \
    24192318    } while (0)
    24202319#endif
     
    24222321/** Rolls back (conceptually only, assumes no writes) and unmaps the guest  memory. */
    24232322#ifndef IEM_WITH_SETJMP
    2424 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \
    2425         RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \
    2426         iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \
    2427     } while (0)
    2428 #else
    24292323# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \
    2430         iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo)
     2324        iemMemRollbackAndUnmap(pVCpu, a_bMapInfo)
     2325#else
     2326# define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \
     2327        iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
    24312328#endif
    24322329
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette