Changeset 102430 in vbox
- Timestamp:
- Dec 2, 2023 2:39:20 AM (14 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r102428 r102430 2106 2106 * Push the stack frame. 2107 2107 */ 2108 uint8_t bUnmapInfo; 2108 2109 uint16_t *pu16Frame; 2109 2110 uint64_t uNewRsp; 2110 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, & uNewRsp);2111 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp); 2111 2112 if (rcStrict != VINF_SUCCESS) 2112 2113 return rcStrict; … … 2121 2122 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel; 2122 2123 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip; 2123 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);2124 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp); 2124 2125 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 2125 2126 return rcStrict; … … 2311 2312 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set. 2312 2313 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set. 2313 * @param SelT SSThe TSS selector of the new task.2314 * @param pNewDescT SSPointer to the new TSS descriptor.2314 * @param SelTss The TSS selector of the new task. 2315 * @param pNewDescTss Pointer to the new TSS descriptor. 2315 2316 */ 2316 2317 VBOXSTRICTRC … … 2321 2322 uint16_t uErr, 2322 2323 uint64_t uCr2, 2323 RTSEL SelT SS,2324 PIEMSELDESC pNewDescT SS) RT_NOEXCEPT2324 RTSEL SelTss, 2325 PIEMSELDESC pNewDescTss) RT_NOEXCEPT 2325 2326 { 2326 2327 Assert(!IEM_IS_REAL_MODE(pVCpu)); … … 2328 2329 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK); 2329 2330 2330 uint32_t const uNewT SSType = pNewDescTSS->Legacy.Gate.u4Type;2331 Assert( uNewT SSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL2332 || uNewT SSType == X86_SEL_TYPE_SYS_286_TSS_BUSY2333 || uNewT SSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2334 || uNewT SSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2335 2336 bool const fIsNewT SS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2337 || uNewT SSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2338 2339 Log(("iemTaskSwitch: enmTaskSwitch=%u NewT SS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,2340 fIsNewT SS386, pVCpu->cpum.GstCtx.eip, uNextEip));2331 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type; 2332 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL 2333 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY 2334 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL 2335 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY); 2336 2337 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL 2338 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY); 2339 2340 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss, 2341 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip)); 2341 2342 2342 2343 /* Update CR2 in case it's a page-fault. */ … … 2350 2351 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)". 2351 2352 */ 2352 uint32_t const uNewT SSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);2353 uint32_t const uNewT SSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;2354 if (uNewT SSLimit < uNewTSSLimitMin)2355 { 2356 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewT SSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",2357 enmTaskSwitch, uNewT SSLimit, uNewTSSLimitMin));2358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelT SS& X86_SEL_MASK_OFF_RPL);2353 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16); 2354 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN; 2355 if (uNewTssLimit < uNewTssLimitMin) 2356 { 2357 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n", 2358 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin)); 2359 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL); 2359 2360 } 2360 2361 … … 2368 2369 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) 2369 2370 { 2370 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelT SS));2371 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelT SS, uNextEip - pVCpu->cpum.GstCtx.eip);2371 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss)); 2372 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip); 2372 2373 } 2373 2374 … … 2378 2379 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH)) 2379 2380 { 2380 uint32_t const uExitInfo1 = SelT SS;2381 uint32_t const uExitInfo1 = SelTss; 2381 2382 uint32_t uExitInfo2 = uErr; 2382 2383 switch (enmTaskSwitch) … … 2404 2405 * end up with smaller than "legal" TSS limits. 2405 2406 */ 2406 uint32_t const uCurT SSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;2407 uint32_t const uCurT SSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;2408 if (uCurT SSLimit < uCurTSSLimitMin)2409 { 2410 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurT SSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",2411 enmTaskSwitch, uCurT SSLimit, uCurTSSLimitMin));2412 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelT SS& X86_SEL_MASK_OFF_RPL);2407 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit; 2408 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29; 2409 if (uCurTssLimit < uCurTssLimitMin) 2410 { 2411 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n", 2412 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin)); 2413 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL); 2413 2414 } 2414 2415 … … 2417 2418 * and not the entire TSS. 2418 2419 */ 2419 void *pvNewTSS; 2420 uint32_t const cbNewTSS = uNewTSSLimitMin + 1; 2421 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy); 2420 uint8_t bUnmapInfoNewTss; 2421 void *pvNewTss; 2422 uint32_t const cbNewTss = uNewTssLimitMin + 1; 2423 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy); 2422 2424 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1); 2423 2425 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may 2424 2426 * not perform correct translation if this happens. See Intel spec. 7.2.1 2425 2427 * "Task-State Segment". */ 2426 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0); 2428 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0); 2429 /** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here. 2430 * Consider wrapping the remainder into a function for simpler cleanup. */ 2427 2431 if (rcStrict != VINF_SUCCESS) 2428 2432 { 2429 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewT SS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,2430 cbNewT SS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));2433 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch, 2434 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict))); 2431 2435 return rcStrict; 2432 2436 } … … 2439 2443 || enmTaskSwitch == IEMTASKSWITCH_IRET) 2440 2444 { 2441 PX86DESC pDescCurTSS; 2442 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX, 2445 uint8_t bUnmapInfoDescCurTss; 2446 PX86DESC pDescCurTss; 2447 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX, 2443 2448 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0); 2444 2449 if (rcStrict != VINF_SUCCESS) … … 2449 2454 } 2450 2455 2451 pDescCurT SS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;2452 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);2456 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 2457 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss); 2453 2458 if (rcStrict != VINF_SUCCESS) 2454 2459 { … … 2461 2466 if (enmTaskSwitch == IEMTASKSWITCH_IRET) 2462 2467 { 2463 Assert( uNewT SSType == X86_SEL_TYPE_SYS_286_TSS_BUSY2464 || uNewT SSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2468 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY 2469 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY); 2465 2470 fEFlags &= ~X86_EFL_NT; 2466 2471 } … … 2470 2475 * Save the CPU state into the current TSS. 2471 2476 */ 2472 RTGCPTR const GCPtrCurT SS= pVCpu->cpum.GstCtx.tr.u64Base;2473 if (GCPtrNewT SS == GCPtrCurTSS)2474 { 2475 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurT SS));2477 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base; 2478 if (GCPtrNewTss == GCPtrCurTss) 2479 { 2480 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss)); 2476 2481 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n", 2477 2482 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax, … … 2479 2484 pVCpu->cpum.GstCtx.ldtr.Sel)); 2480 2485 } 2481 if (fIsNewT SS386)2486 if (fIsNewTss386) 2482 2487 { 2483 2488 /* … … 2485 2490 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields. 2486 2491 */ 2487 void *pvCurTSS32; 2488 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip); 2489 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip); 2492 uint8_t bUnmapInfoCurTss32; 2493 void *pvCurTss32; 2494 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip); 2495 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip); 2490 2496 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64); 2491 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0); 2497 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX, 2498 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0); 2492 2499 if (rcStrict != VINF_SUCCESS) 2493 2500 { 2494 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurT SS=%#RGv cb=%u rc=%Rrc\n",2495 enmTaskSwitch, GCPtrCurT SS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));2501 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n", 2502 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict))); 2496 2503 return rcStrict; 2497 2504 } 2498 2505 2499 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurT SS..cbCurTSS). */2500 PX86TSS32 pCurT SS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);2501 pCurT SS32->eip = uNextEip;2502 pCurT SS32->eflags = fEFlags;2503 pCurT SS32->eax = pVCpu->cpum.GstCtx.eax;2504 pCurT SS32->ecx = pVCpu->cpum.GstCtx.ecx;2505 pCurT SS32->edx = pVCpu->cpum.GstCtx.edx;2506 pCurT SS32->ebx = pVCpu->cpum.GstCtx.ebx;2507 pCurT SS32->esp = pVCpu->cpum.GstCtx.esp;2508 pCurT SS32->ebp = pVCpu->cpum.GstCtx.ebp;2509 pCurT SS32->esi = pVCpu->cpum.GstCtx.esi;2510 pCurT SS32->edi = pVCpu->cpum.GstCtx.edi;2511 pCurT SS32->es = pVCpu->cpum.GstCtx.es.Sel;2512 pCurT SS32->cs = pVCpu->cpum.GstCtx.cs.Sel;2513 pCurT SS32->ss = pVCpu->cpum.GstCtx.ss.Sel;2514 pCurT SS32->ds = pVCpu->cpum.GstCtx.ds.Sel;2515 pCurT SS32->fs = pVCpu->cpum.GstCtx.fs.Sel;2516 pCurT SS32->gs = pVCpu->cpum.GstCtx.gs.Sel;2517 2518 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);2506 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */ 2507 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss); 2508 pCurTss32->eip = uNextEip; 2509 pCurTss32->eflags = fEFlags; 2510 pCurTss32->eax = pVCpu->cpum.GstCtx.eax; 2511 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx; 2512 pCurTss32->edx = pVCpu->cpum.GstCtx.edx; 2513 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx; 2514 pCurTss32->esp = pVCpu->cpum.GstCtx.esp; 2515 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp; 2516 pCurTss32->esi = pVCpu->cpum.GstCtx.esi; 2517 pCurTss32->edi = pVCpu->cpum.GstCtx.edi; 2518 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel; 2519 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel; 2520 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel; 2521 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel; 2522 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel; 2523 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel; 2524 2525 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32); 2519 2526 if (rcStrict != VINF_SUCCESS) 2520 2527 { … … 2529 2536 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size. 2530 2537 */ 2531 void *pvCurTSS16; 2532 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip); 2533 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip); 2538 uint8_t bUnmapInfoCurTss16; 2539 void *pvCurTss16; 2540 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip); 2541 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip); 2534 2542 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28); 2535 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0); 2543 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX, 2544 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0); 2536 2545 if (rcStrict != VINF_SUCCESS) 2537 2546 { 2538 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurT SS=%#RGv cb=%u rc=%Rrc\n",2539 enmTaskSwitch, GCPtrCurT SS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));2547 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n", 2548 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict))); 2540 2549 return rcStrict; 2541 2550 } 2542 2551 2543 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurT SS..cbCurTSS). */2544 PX86TSS16 pCurT SS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);2545 pCurT SS16->ip = uNextEip;2546 pCurT SS16->flags = (uint16_t)fEFlags;2547 pCurT SS16->ax = pVCpu->cpum.GstCtx.ax;2548 pCurT SS16->cx = pVCpu->cpum.GstCtx.cx;2549 pCurT SS16->dx = pVCpu->cpum.GstCtx.dx;2550 pCurT SS16->bx = pVCpu->cpum.GstCtx.bx;2551 pCurT SS16->sp = pVCpu->cpum.GstCtx.sp;2552 pCurT SS16->bp = pVCpu->cpum.GstCtx.bp;2553 pCurT SS16->si = pVCpu->cpum.GstCtx.si;2554 pCurT SS16->di = pVCpu->cpum.GstCtx.di;2555 pCurT SS16->es = pVCpu->cpum.GstCtx.es.Sel;2556 pCurT SS16->cs = pVCpu->cpum.GstCtx.cs.Sel;2557 pCurT SS16->ss = pVCpu->cpum.GstCtx.ss.Sel;2558 pCurT SS16->ds = pVCpu->cpum.GstCtx.ds.Sel;2559 2560 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);2552 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */ 2553 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss); 2554 pCurTss16->ip = uNextEip; 2555 pCurTss16->flags = (uint16_t)fEFlags; 2556 pCurTss16->ax = pVCpu->cpum.GstCtx.ax; 2557 pCurTss16->cx = pVCpu->cpum.GstCtx.cx; 2558 pCurTss16->dx = pVCpu->cpum.GstCtx.dx; 2559 pCurTss16->bx = pVCpu->cpum.GstCtx.bx; 2560 pCurTss16->sp = pVCpu->cpum.GstCtx.sp; 2561 pCurTss16->bp = pVCpu->cpum.GstCtx.bp; 2562 pCurTss16->si = pVCpu->cpum.GstCtx.si; 2563 pCurTss16->di = pVCpu->cpum.GstCtx.di; 2564 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel; 2565 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel; 2566 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel; 2567 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel; 2568 2569 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16); 2561 2570 if (rcStrict != VINF_SUCCESS) 2562 2571 { … … 2574 2583 { 2575 2584 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */ 2576 PX86TSS32 pNewTSS = (PX86TSS32)pvNewT SS;2585 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss; 2577 2586 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel; 2578 2587 } … … 2585 2594 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt; 2586 2595 bool fNewDebugTrap; 2587 if (fIsNewT SS386)2588 { 2589 PCX86TSS32 pNewT SS32 = (PCX86TSS32)pvNewTSS;2590 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewT SS32->cr3 : 0;2591 uNewEip = pNewT SS32->eip;2592 uNewEflags = pNewT SS32->eflags;2593 uNewEax = pNewT SS32->eax;2594 uNewEcx = pNewT SS32->ecx;2595 uNewEdx = pNewT SS32->edx;2596 uNewEbx = pNewT SS32->ebx;2597 uNewEsp = pNewT SS32->esp;2598 uNewEbp = pNewT SS32->ebp;2599 uNewEsi = pNewT SS32->esi;2600 uNewEdi = pNewT SS32->edi;2601 uNewES = pNewT SS32->es;2602 uNewCS = pNewT SS32->cs;2603 uNewSS = pNewT SS32->ss;2604 uNewDS = pNewT SS32->ds;2605 uNewFS = pNewT SS32->fs;2606 uNewGS = pNewT SS32->gs;2607 uNewLdt = pNewT SS32->selLdt;2608 fNewDebugTrap = RT_BOOL(pNewT SS32->fDebugTrap);2596 if (fIsNewTss386) 2597 { 2598 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss; 2599 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0; 2600 uNewEip = pNewTss32->eip; 2601 uNewEflags = pNewTss32->eflags; 2602 uNewEax = pNewTss32->eax; 2603 uNewEcx = pNewTss32->ecx; 2604 uNewEdx = pNewTss32->edx; 2605 uNewEbx = pNewTss32->ebx; 2606 uNewEsp = pNewTss32->esp; 2607 uNewEbp = pNewTss32->ebp; 2608 uNewEsi = pNewTss32->esi; 2609 uNewEdi = pNewTss32->edi; 2610 uNewES = pNewTss32->es; 2611 uNewCS = pNewTss32->cs; 2612 uNewSS = pNewTss32->ss; 2613 uNewDS = pNewTss32->ds; 2614 uNewFS = pNewTss32->fs; 2615 uNewGS = pNewTss32->gs; 2616 uNewLdt = pNewTss32->selLdt; 2617 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap); 2609 2618 } 2610 2619 else 2611 2620 { 2612 PCX86TSS16 pNewT SS16 = (PCX86TSS16)pvNewTSS;2621 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss; 2613 2622 uNewCr3 = 0; 2614 uNewEip = pNewT SS16->ip;2615 uNewEflags = pNewT SS16->flags;2616 uNewEax = UINT32_C(0xffff0000) | pNewT SS16->ax;2617 uNewEcx = UINT32_C(0xffff0000) | pNewT SS16->cx;2618 uNewEdx = UINT32_C(0xffff0000) | pNewT SS16->dx;2619 uNewEbx = UINT32_C(0xffff0000) | pNewT SS16->bx;2620 uNewEsp = UINT32_C(0xffff0000) | pNewT SS16->sp;2621 uNewEbp = UINT32_C(0xffff0000) | pNewT SS16->bp;2622 uNewEsi = UINT32_C(0xffff0000) | pNewT SS16->si;2623 uNewEdi = UINT32_C(0xffff0000) | pNewT SS16->di;2624 uNewES = pNewT SS16->es;2625 uNewCS = pNewT SS16->cs;2626 uNewSS = pNewT SS16->ss;2627 uNewDS = pNewT SS16->ds;2623 uNewEip = pNewTss16->ip; 2624 uNewEflags = pNewTss16->flags; 2625 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax; 2626 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx; 2627 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx; 2628 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx; 2629 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp; 2630 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp; 2631 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si; 2632 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di; 2633 uNewES = pNewTss16->es; 2634 uNewCS = pNewTss16->cs; 2635 uNewSS = pNewTss16->ss; 2636 uNewDS = pNewTss16->ds; 2628 2637 uNewFS = 0; 2629 2638 uNewGS = 0; 2630 uNewLdt = pNewT SS16->selLdt;2639 uNewLdt = pNewTss16->selLdt; 2631 2640 fNewDebugTrap = false; 2632 2641 } 2633 2642 2634 if (GCPtrNewT SS == GCPtrCurTSS)2643 if (GCPtrNewTss == GCPtrCurTss) 2635 2644 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n", 2636 2645 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt)); … … 2639 2648 * We're done accessing the new TSS. 2640 2649 */ 2641 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);2650 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss); 2642 2651 if (rcStrict != VINF_SUCCESS) 2643 2652 { … … 2651 2660 if (enmTaskSwitch != IEMTASKSWITCH_IRET) 2652 2661 { 2653 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescT SS, sizeof(*pNewDescTSS), UINT8_MAX,2654 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelT SS& X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);2662 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX, 2663 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0); 2655 2664 if (rcStrict != VINF_SUCCESS) 2656 2665 { … … 2661 2670 2662 2671 /* Check that the descriptor indicates the new TSS is available (not busy). */ 2663 AssertMsg( pNewDescT SS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL2664 || pNewDescT SS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,2665 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));2666 2667 pNewDescT SS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;2668 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);2672 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL 2673 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL, 2674 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type)); 2675 2676 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK; 2677 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss); 2669 2678 if (rcStrict != VINF_SUCCESS) 2670 2679 { … … 2679 2688 * until the completion of the task switch but before executing any instructions in the new task. 2680 2689 */ 2681 pVCpu->cpum.GstCtx.tr.Sel = SelT SS;2682 pVCpu->cpum.GstCtx.tr.ValidSel = SelT SS;2690 pVCpu->cpum.GstCtx.tr.Sel = SelTss; 2691 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss; 2683 2692 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID; 2684 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescT SS->Legacy);2685 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescT SS->Legacy);2686 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescT SS->Legacy);2693 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy); 2694 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy); 2695 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy); 2687 2696 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR); 2688 2697 … … 2758 2767 * Switch CR3 for the new task. 2759 2768 */ 2760 if ( fIsNewT SS3862769 if ( fIsNewTss386 2761 2770 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)) 2762 2771 { … … 3000 3009 3001 3010 /** @todo Debug trap. */ 3002 if (fIsNewT SS386 && fNewDebugTrap)3011 if (fIsNewTss386 && fNewDebugTrap) 3003 3012 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n")); 3004 3013 … … 3022 3031 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT); 3023 3032 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy); 3024 uint8_t const cbStackFrame = fIsNewT SS386 ? 4 : 2;3033 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2; 3025 3034 3026 3035 /* Check that there is sufficient space on the stack. */ … … 3050 3059 3051 3060 3052 if (fIsNewT SS386)3061 if (fIsNewTss386) 3053 3062 rcStrict = iemMemStackPushU32(pVCpu, uErr); 3054 3063 else … … 3057 3066 { 3058 3067 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", 3059 fIsNewT SS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));3068 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict))); 3060 3069 return rcStrict; 3061 3070 } … … 3201 3210 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1; 3202 3211 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL; 3203 RTSEL SelT SS= Idte.Gate.u16Sel;3212 RTSEL SelTss = Idte.Gate.u16Sel; 3204 3213 3205 3214 /* … … 3207 3216 */ 3208 3217 IEMSELDESC DescTSS; 3209 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelT SS, X86_XCPT_GP, (SelTSS& uSelMask) | uExt);3218 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt); 3210 3219 if (rcStrict != VINF_SUCCESS) 3211 3220 { 3212 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelT SS,3221 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss, 3213 3222 VBOXSTRICTRC_VAL(rcStrict))); 3214 3223 return rcStrict; … … 3221 3230 { 3222 3231 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n", 3223 u8Vector, SelT SS, DescTSS.Legacy.au64));3224 return iemRaiseGeneralProtectionFault(pVCpu, (SelT SS& uSelMask) | uExt);3232 u8Vector, SelTss, DescTSS.Legacy.au64)); 3233 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt); 3225 3234 } 3226 3235 … … 3228 3237 if (!DescTSS.Legacy.Gen.u1Present) 3229 3238 { 3230 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelT SS, DescTSS.Legacy.au64));3231 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelT SS& uSelMask) | uExt);3239 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64)); 3240 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt); 3232 3241 } 3233 3242 … … 3235 3244 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, 3236 3245 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip, 3237 fFlags, uErr, uCr2, SelT SS, &DescTSS);3246 fFlags, uErr, uCr2, SelTss, &DescTSS); 3238 3247 } 3239 3248 … … 3384 3393 3385 3394 /* Create the stack frame. */ 3395 uint8_t bUnmapInfoStackFrame; 3386 3396 RTPTRUNION uStackFrame; 3387 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,3397 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX, 3388 3398 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), 3389 3399 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */ 3390 3400 if (rcStrict != VINF_SUCCESS) 3391 3401 return rcStrict; 3392 void * const pvStackFrame = uStackFrame.pv;3393 3402 if (f32BitGate) 3394 3403 { … … 3429 3438 } 3430 3439 } 3431 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);3440 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); 3432 3441 if (rcStrict != VINF_SUCCESS) 3433 3442 return rcStrict; … … 3487 3496 { 3488 3497 uint64_t uNewRsp; 3498 uint8_t bUnmapInfoStackFrame; 3489 3499 RTPTRUNION uStackFrame; 3490 3500 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate; 3491 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp); 3501 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, 3502 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp); 3492 3503 if (rcStrict != VINF_SUCCESS) 3493 3504 return rcStrict; 3494 void * const pvStackFrame = uStackFrame.pv;3495 3505 3496 3506 if (f32BitGate) … … 3510 3520 uStackFrame.pu16[2] = fEfl; 3511 3521 } 3512 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */3522 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */ 3513 3523 if (rcStrict != VINF_SUCCESS) 3514 3524 return rcStrict; … … 3747 3757 3748 3758 /* Create the stack frame. */ 3759 uint8_t bUnmapInfoStackFrame; 3749 3760 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR)); 3750 3761 RTPTRUNION uStackFrame; 3751 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,3762 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX, 3752 3763 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */ 3753 3764 if (rcStrict != VINF_SUCCESS) 3754 3765 return rcStrict; 3755 void * const pvStackFrame = uStackFrame.pv;3756 3766 3757 3767 if (fFlags & IEM_XCPT_FLAGS_ERR) … … 3762 3772 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp; 3763 3773 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel; 3764 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);3774 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); 3765 3775 if (rcStrict != VINF_SUCCESS) 3766 3776 return rcStrict; … … 6031 6041 */ 6032 6042 static VBOXSTRICTRC 6033 iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) 6043 iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, 6044 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) 6034 6045 { 6035 6046 Assert(cbMem <= GUEST_PAGE_SIZE); … … 6158 6169 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 6159 6170 *ppvMem = pbBuf; 6171 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4); 6160 6172 return VINF_SUCCESS; 6161 6173 } … … 6165 6177 * iemMemMap woker that deals with iemMemPageMap failures. 6166 6178 */ 6167 static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,6179 static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, 6168 6180 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) 6169 6181 { … … 6244 6256 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 6245 6257 *ppvMem = pbBuf; 6258 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4); 6246 6259 return VINF_SUCCESS; 6247 6260 } … … 6263 6276 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6264 6277 * @param ppvMem Where to return the pointer to the mapped memory. 6278 * @param bUnmapInfo Where to return unmap info to be passed to 6279 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when 6280 * done. 6265 6281 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6, 6266 6282 * 8, 12, 16, 32 or 512. When used by string operations … … 6282 6298 * Pass zero to skip alignment. 6283 6299 */ 6284 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,6300 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, 6285 6301 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT 6286 6302 { … … 6319 6335 { /* likely */ } 6320 6336 else 6321 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);6337 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess); 6322 6338 6323 6339 /* … … 6484 6500 { /* probably likely */ } 6485 6501 else 6486 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,6502 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, 6487 6503 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess, 6488 6504 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED … … 6503 6519 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 6504 6520 if (rcStrict != VINF_SUCCESS) 6505 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);6521 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict); 6506 6522 } 6507 6523 … … 6528 6544 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 6529 6545 if (rcStrict != VINF_SUCCESS) 6530 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);6546 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict); 6531 6547 6532 6548 #endif /* !IEM_WITH_DATA_TLB */ … … 6542 6558 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 6543 6559 *ppvMem = pvMem; 6560 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4); 6561 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf); 6562 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8); 6544 6563 6545 6564 return VINF_SUCCESS; … … 6552 6571 * @returns Strict VBox status code. 6553 6572 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6554 * @param pvMem The mapping. 6555 * @param fAccess The kind of access. 6556 */ 6557 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT 6558 { 6559 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 6560 AssertReturn(iMemMap >= 0, iMemMap); 6573 * @param bUnmapInfo Unmap info set by iemMemMap. 6574 */ 6575 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT 6576 { 6577 uintptr_t const iMemMap = bUnmapInfo & 0x7; 6578 AssertMsgReturn( (bUnmapInfo & 0x08) 6579 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings) 6580 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4), 6581 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess), 6582 VERR_NOT_FOUND); 6561 6583 6562 6584 /* If it's bounce buffered, we may need to write back the buffer. */ … … 6582 6604 * 6583 6605 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6584 * @param pvMem The mapping. 6585 * @param fAccess The kind of access. 6586 */ 6587 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT 6588 { 6589 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 6590 AssertReturnVoid(iMemMap >= 0); 6606 * @param bUnmapInfo Unmap info set by iemMemMap. 6607 */ 6608 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT 6609 { 6610 uintptr_t const iMemMap = bUnmapInfo & 0x7; 6611 AssertMsgReturnVoid( (bUnmapInfo & 0x08) 6612 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings) 6613 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) 6614 == ((unsigned)bUnmapInfo >> 4), 6615 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess)); 6591 6616 6592 6617 /* Unlock it if necessary. */ … … 6616 6641 * 6617 6642 * @param pVCpu The cross context virtual CPU structure of the calling thread. 6643 * @param bUnmapInfo Where to return unmap info to be passed to 6644 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp, 6645 * iemMemCommitAndUnmapWoSafeJmp, 6646 * iemMemCommitAndUnmapRoSafeJmp, 6647 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap 6648 * when done. 6618 6649 * @param cbMem The number of bytes to map. This is usually 1, 6619 6650 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by … … 6636 6667 * Pass zero to skip alignment. 6637 6668 */ 6638 void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,6669 void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess, 6639 6670 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP 6640 6671 { … … 6705 6736 { 6706 6737 void *pvMem; 6707 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);6738 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess); 6708 6739 if (rcStrict == VINF_SUCCESS) 6709 6740 return pvMem; … … 6842 6873 else 6843 6874 { 6844 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,6875 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem, 6845 6876 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess, 6846 6877 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED … … 6866 6897 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock); 6867 6898 if (rcStrict == VINF_SUCCESS) 6899 { 6900 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4); 6868 6901 return pbMem; 6902 } 6869 6903 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 6870 6904 } … … 6896 6930 else 6897 6931 { 6898 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);6932 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict); 6899 6933 if (rcStrict == VINF_SUCCESS) 6900 6934 return pvMem; … … 6913 6947 6914 6948 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem); 6949 6950 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4); 6915 6951 return pvMem; 6916 6952 } … … 6924 6960 * @param fAccess The kind of access. 6925 6961 */ 6926 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP 6927 { 6928 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 6929 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap)); 6962 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP 6963 { 6964 uintptr_t const iMemMap = bUnmapInfo & 0x7; 6965 AssertMsgReturnVoid( (bUnmapInfo & 0x08) 6966 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings) 6967 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) 6968 == ((unsigned)bUnmapInfo >> 4), 6969 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess)); 6930 6970 6931 6971 /* If it's bounce buffered, we may need to write back the buffer. */ … … 6952 6992 6953 6993 /** Fallback for iemMemCommitAndUnmapRwJmp. */ 6954 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP6955 { 6956 Assert( bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); RT_NOREF_PV(bMapInfo);6957 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_RW);6994 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP 6995 { 6996 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE)); 6997 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 6958 6998 } 6959 6999 6960 7000 6961 7001 /** Fallback for iemMemCommitAndUnmapWoJmp. */ 6962 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP6963 { 6964 Assert( bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); RT_NOREF_PV(bMapInfo);6965 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_W);7002 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP 7003 { 7004 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE); 7005 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 6966 7006 } 6967 7007 6968 7008 6969 7009 /** Fallback for iemMemCommitAndUnmapRoJmp. */ 6970 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP6971 { 6972 Assert( bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);6973 iemMemCommitAndUnmapJmp(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);7010 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP 7011 { 7012 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ); 7013 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 6974 7014 } 6975 7015 6976 7016 6977 7017 /** Fallback for iemMemRollbackAndUnmapWo. */ 6978 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT6979 { 6980 Assert( bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);6981 iemMemRollbackAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);7018 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT 7019 { 7020 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE); 7021 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo); 6982 7022 } 6983 7023 … … 6998 7038 * @param fAccess The kind of access. 6999 7039 */ 7000 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT 7001 { 7002 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess); 7003 AssertReturn(iMemMap >= 0, iMemMap); 7040 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT 7041 { 7042 uintptr_t const iMemMap = bUnmapInfo & 0x7; 7043 AssertMsgReturn( (bUnmapInfo & 0x08) 7044 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings) 7045 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) 7046 == ((unsigned)bUnmapInfo >> 4), 7047 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess), 7048 VERR_NOT_FOUND); 7004 7049 7005 7050 /* If it's bounce buffered, we may need to write back the buffer. */ … … 7131 7176 { 7132 7177 /* The lazy approach for now... */ 7178 uint8_t bUnmapInfo; 7133 7179 uint32_t const *pu32Src; 7134 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,7180 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, 7135 7181 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1); 7136 7182 if (rc == VINF_SUCCESS) 7137 7183 { 7138 7184 *pu64Dst = *pu32Src; 7139 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);7185 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7140 7186 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst)); 7141 7187 } … … 7158 7204 { 7159 7205 /* The lazy approach for now... */ 7206 uint8_t bUnmapInfo; 7160 7207 int32_t const *pi32Src; 7161 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,7208 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem, 7162 7209 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1); 7163 7210 if (rc == VINF_SUCCESS) 7164 7211 { 7165 7212 *pu64Dst = *pi32Src; 7166 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);7213 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7167 7214 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst)); 7168 7215 } … … 7192 7239 { 7193 7240 /* The lazy approach for now... */ 7241 uint8_t bUnmapInfo; 7194 7242 PCRTUINT128U pu128Src; 7195 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,7243 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, 7196 7244 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7197 7245 if (rc == VINF_SUCCESS) … … 7199 7247 pu128Dst->au64[0] = pu128Src->au64[0]; 7200 7248 pu128Dst->au64[1] = pu128Src->au64[1]; 7201 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);7249 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7202 7250 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7203 7251 } … … 7223 7271 { 7224 7272 /* The lazy approach for now... */ 7225 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7273 uint8_t bUnmapInfo; 7274 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7226 7275 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7227 7276 pu128Dst->au64[0] = pu128Src->au64[0]; 7228 7277 pu128Dst->au64[1] = pu128Src->au64[1]; 7229 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);7278 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 7230 7279 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7231 7280 } … … 7246 7295 { 7247 7296 /* The lazy approach for now... */ 7297 uint8_t bUnmapInfo; 7248 7298 PCRTUINT256U pu256Src; 7249 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,7299 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, 7250 7300 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */); 7251 7301 if (rc == VINF_SUCCESS) … … 7255 7305 pu256Dst->au64[2] = pu256Src->au64[2]; 7256 7306 pu256Dst->au64[3] = pu256Src->au64[3]; 7257 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);7307 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7258 7308 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7259 7309 } … … 7275 7325 { 7276 7326 /* The lazy approach for now... */ 7277 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, 7327 uint8_t bUnmapInfo; 7328 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, 7278 7329 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */); 7279 7330 pu256Dst->au64[0] = pu256Src->au64[0]; … … 7281 7332 pu256Dst->au64[2] = pu256Src->au64[2]; 7282 7333 pu256Dst->au64[3] = pu256Src->au64[3]; 7283 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);7334 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 7284 7335 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7285 7336 } … … 7303 7354 { 7304 7355 /* The lazy approach for now... */ 7356 uint8_t bUnmapInfo; 7305 7357 PCRTUINT256U pu256Src; 7306 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,7358 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, 7307 7359 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7308 7360 if (rc == VINF_SUCCESS) … … 7312 7364 pu256Dst->au64[2] = pu256Src->au64[2]; 7313 7365 pu256Dst->au64[3] = pu256Src->au64[3]; 7314 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);7366 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7315 7367 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7316 7368 } … … 7336 7388 { 7337 7389 /* The lazy approach for now... */ 7338 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7390 uint8_t bUnmapInfo; 7391 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7339 7392 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7340 7393 pu256Dst->au64[0] = pu256Src->au64[0]; … … 7342 7395 pu256Dst->au64[2] = pu256Src->au64[2]; 7343 7396 pu256Dst->au64[3] = pu256Src->au64[3]; 7344 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);7397 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 7345 7398 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7346 7399 } … … 7433 7486 { 7434 7487 /* The lazy approach for now... */ 7435 PRTUINT128U pu128Dst; 7436 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 7488 uint8_t bUnmapInfo; 7489 PRTUINT128U pu128Dst; 7490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 7437 7491 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7438 7492 if (rc == VINF_SUCCESS) … … 7440 7494 pu128Dst->au64[0] = u128Value.au64[0]; 7441 7495 pu128Dst->au64[1] = u128Value.au64[1]; 7442 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);7496 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7443 7497 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7444 7498 } … … 7462 7516 { 7463 7517 /* The lazy approach for now... */ 7464 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 7518 uint8_t bUnmapInfo; 7519 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 7465 7520 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE); 7466 7521 pu128Dst->au64[0] = u128Value.au64[0]; 7467 7522 pu128Dst->au64[1] = u128Value.au64[1]; 7468 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);7523 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 7469 7524 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst)); 7470 7525 } … … 7485 7540 { 7486 7541 /* The lazy approach for now... */ 7487 PRTUINT256U pu256Dst; 7488 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7542 uint8_t bUnmapInfo; 7543 PRTUINT256U pu256Dst; 7544 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7489 7545 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */); 7490 7546 if (rc == VINF_SUCCESS) … … 7494 7550 pu256Dst->au64[2] = pu256Value->au64[2]; 7495 7551 pu256Dst->au64[3] = pu256Value->au64[3]; 7496 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);7552 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7497 7553 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7498 7554 } … … 7514 7570 { 7515 7571 /* The lazy approach for now... */ 7516 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7572 uint8_t bUnmapInfo; 7573 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7517 7574 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */); 7518 7575 pu256Dst->au64[0] = pu256Value->au64[0]; … … 7520 7577 pu256Dst->au64[2] = pu256Value->au64[2]; 7521 7578 pu256Dst->au64[3] = pu256Value->au64[3]; 7522 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);7579 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 7523 7580 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7524 7581 } … … 7539 7596 { 7540 7597 /* The lazy approach for now... */ 7541 PRTUINT256U pu256Dst; 7542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7598 uint8_t bUnmapInfo; 7599 PRTUINT256U pu256Dst; 7600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7543 7601 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP); 7544 7602 if (rc == VINF_SUCCESS) … … 7548 7606 pu256Dst->au64[2] = pu256Value->au64[2]; 7549 7607 pu256Dst->au64[3] = pu256Value->au64[3]; 7550 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);7608 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7551 7609 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7552 7610 } … … 7570 7628 { 7571 7629 /* The lazy approach for now... */ 7572 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7630 uint8_t bUnmapInfo; 7631 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem, 7573 7632 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP); 7574 7633 pu256Dst->au64[0] = pu256Value->au64[0]; … … 7576 7635 pu256Dst->au64[2] = pu256Value->au64[2]; 7577 7636 pu256Dst->au64[3] = pu256Value->au64[3]; 7578 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);7637 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 7579 7638 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst)); 7580 7639 } … … 7630 7689 * don't commit register until the commit call 7631 7690 * succeeds. 7691 * @param pbUnmapInfo Where to store unmap info for 7692 * iemMemStackPushCommitSpecial. 7632 7693 * @param puNewRsp Where to return the new RSP value. This must be 7633 7694 * passed unchanged to … … 7635 7696 */ 7636 7697 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 7637 void **ppvMem, uint 64_t *puNewRsp) RT_NOEXCEPT7698 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT 7638 7699 { 7639 7700 Assert(cbMem < UINT8_MAX); 7640 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp); 7641 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, 7642 IEM_ACCESS_STACK_W, cbAlign); 7701 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp); 7702 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign); 7643 7703 } 7644 7704 … … 7656 7716 * iemMemStackPushBeginSpecial(). 7657 7717 */ 7658 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT7659 { 7660 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);7718 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT 7719 { 7720 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7661 7721 if (rcStrict == VINF_SUCCESS) 7662 7722 pVCpu->cpum.GstCtx.rsp = uNewRsp; … … 7675 7735 * @param cbAlign The alignment mask (7, 3, 1). 7676 7736 * @param ppvMem Where to return the pointer to the stack memory. 7737 * @param pbUnmapInfo Where to store unmap info for 7738 * iemMemStackPopDoneSpecial. 7677 7739 * @param puNewRsp Where to return the new RSP value. This must be 7678 7740 * assigned to CPUMCTX::rsp manually some time … … 7681 7743 */ 7682 7744 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 7683 void const **ppvMem, uint 64_t *puNewRsp) RT_NOEXCEPT7745 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT 7684 7746 { 7685 7747 Assert(cbMem < UINT8_MAX); 7686 7748 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp); 7687 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);7749 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign); 7688 7750 } 7689 7751 … … 7701 7763 * @param cbMem The number of bytes to pop from the stack. 7702 7764 * @param ppvMem Where to return the pointer to the stack memory. 7765 * @param pbUnmapInfo Where to store unmap info for 7766 * iemMemStackPopDoneSpecial. 7703 7767 * @param uCurNewRsp The current uncommitted RSP value. (No need to 7704 7768 * return this because all use of this function is … … 7707 7771 */ 7708 7772 VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem, 7709 void const **ppvMem, uint 64_t uCurNewRsp) RT_NOEXCEPT7773 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT 7710 7774 { 7711 7775 Assert(cbMem < UINT8_MAX); … … 7720 7784 GCPtrTop = (uint16_t)uCurNewRsp; 7721 7785 7722 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,7786 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R, 7723 7787 0 /* checked in iemMemStackPopBeginSpecial */); 7724 7788 } … … 7733 7797 * @returns Strict VBox status code. 7734 7798 * @param pVCpu The cross context virtual CPU structure of the calling thread. 7735 * @param pvMem The pointerreturned by7799 * @param bUnmapInfo Unmap information returned by 7736 7800 * iemMemStackPopBeginSpecial() or 7737 7801 * iemMemStackPopContinueSpecial(). 7738 7802 */ 7739 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT7740 { 7741 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);7803 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT 7804 { 7805 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7742 7806 } 7743 7807 … … 7756 7820 { 7757 7821 /* The lazy approach for now... */ 7822 uint8_t bUnmapInfo; 7758 7823 uint8_t const *pbSrc; 7759 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);7824 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 7760 7825 if (rc == VINF_SUCCESS) 7761 7826 { 7762 7827 *pbDst = *pbSrc; 7763 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);7828 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7764 7829 } 7765 7830 return rc; … … 7780 7845 { 7781 7846 /* The lazy approach for now... */ 7847 uint8_t bUnmapInfo; 7782 7848 uint16_t const *pu16Src; 7783 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);7849 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 7784 7850 if (rc == VINF_SUCCESS) 7785 7851 { 7786 7852 *pu16Dst = *pu16Src; 7787 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);7853 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7788 7854 } 7789 7855 return rc; … … 7804 7870 { 7805 7871 /* The lazy approach for now... */ 7872 uint8_t bUnmapInfo; 7806 7873 uint32_t const *pu32Src; 7807 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);7874 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 7808 7875 if (rc == VINF_SUCCESS) 7809 7876 { 7810 7877 *pu32Dst = *pu32Src; 7811 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);7878 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7812 7879 } 7813 7880 return rc; … … 7828 7895 { 7829 7896 /* The lazy approach for now... */ 7897 uint8_t bUnmapInfo; 7830 7898 uint64_t const *pu64Src; 7831 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);7899 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0); 7832 7900 if (rc == VINF_SUCCESS) 7833 7901 { 7834 7902 *pu64Dst = *pu64Src; 7835 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);7903 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7836 7904 } 7837 7905 return rc; … … 7965 8033 */ 7966 8034 VBOXSTRICTRC rcStrict; 8035 uint8_t bUnmapInfo; 7967 8036 uint32_t volatile *pu32; 7968 8037 if ((GCPtr & 3) == 0) … … 7970 8039 /* The normal case, map the 32-bit bits around the accessed bit (40). */ 7971 8040 GCPtr += 2 + 2; 7972 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8041 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0); 7973 8042 if (rcStrict != VINF_SUCCESS) 7974 8043 return rcStrict; … … 7978 8047 { 7979 8048 /* The misaligned GDT/LDT case, map the whole thing. */ 7980 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8049 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0); 7981 8050 if (rcStrict != VINF_SUCCESS) 7982 8051 return rcStrict; … … 7990 8059 } 7991 8060 7992 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);8061 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 7993 8062 } 7994 8063 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r101450 r102430 432 432 else 433 433 { 434 uint16_t const *pa16Mem = NULL; 435 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa16Mem) - 1); 434 uint8_t bUnmapInfo; 435 uint16_t const *pau16Mem = NULL; 436 rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrStart, 437 IEM_ACCESS_STACK_R, sizeof(*pau16Mem) - 1); 436 438 if (rcStrict == VINF_SUCCESS) 437 439 { 438 pVCpu->cpum.GstCtx.di = pa 16Mem[7 - X86_GREG_xDI];439 pVCpu->cpum.GstCtx.si = pa 16Mem[7 - X86_GREG_xSI];440 pVCpu->cpum.GstCtx.bp = pa 16Mem[7 - X86_GREG_xBP];440 pVCpu->cpum.GstCtx.di = pau16Mem[7 - X86_GREG_xDI]; 441 pVCpu->cpum.GstCtx.si = pau16Mem[7 - X86_GREG_xSI]; 442 pVCpu->cpum.GstCtx.bp = pau16Mem[7 - X86_GREG_xBP]; 441 443 /* skip sp */ 442 pVCpu->cpum.GstCtx.bx = pa 16Mem[7 - X86_GREG_xBX];443 pVCpu->cpum.GstCtx.dx = pa 16Mem[7 - X86_GREG_xDX];444 pVCpu->cpum.GstCtx.cx = pa 16Mem[7 - X86_GREG_xCX];445 pVCpu->cpum.GstCtx.ax = pa 16Mem[7 - X86_GREG_xAX];446 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);444 pVCpu->cpum.GstCtx.bx = pau16Mem[7 - X86_GREG_xBX]; 445 pVCpu->cpum.GstCtx.dx = pau16Mem[7 - X86_GREG_xDX]; 446 pVCpu->cpum.GstCtx.cx = pau16Mem[7 - X86_GREG_xCX]; 447 pVCpu->cpum.GstCtx.ax = pau16Mem[7 - X86_GREG_xAX]; 448 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 447 449 if (rcStrict == VINF_SUCCESS) 448 450 { … … 512 514 else 513 515 { 514 uint32_t const *pa32Mem; 515 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R, sizeof(*pa32Mem) - 1); 516 uint8_t bUnmapInfo; 517 uint32_t const *pau32Mem; 518 rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrStart, 519 IEM_ACCESS_STACK_R, sizeof(*pau32Mem) - 1); 516 520 if (rcStrict == VINF_SUCCESS) 517 521 { 518 pVCpu->cpum.GstCtx.rdi = pa 32Mem[7 - X86_GREG_xDI];519 pVCpu->cpum.GstCtx.rsi = pa 32Mem[7 - X86_GREG_xSI];520 pVCpu->cpum.GstCtx.rbp = pa 32Mem[7 - X86_GREG_xBP];522 pVCpu->cpum.GstCtx.rdi = pau32Mem[7 - X86_GREG_xDI]; 523 pVCpu->cpum.GstCtx.rsi = pau32Mem[7 - X86_GREG_xSI]; 524 pVCpu->cpum.GstCtx.rbp = pau32Mem[7 - X86_GREG_xBP]; 521 525 /* skip esp */ 522 pVCpu->cpum.GstCtx.rbx = pa 32Mem[7 - X86_GREG_xBX];523 pVCpu->cpum.GstCtx.rdx = pa 32Mem[7 - X86_GREG_xDX];524 pVCpu->cpum.GstCtx.rcx = pa 32Mem[7 - X86_GREG_xCX];525 pVCpu->cpum.GstCtx.rax = pa 32Mem[7 - X86_GREG_xAX];526 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);526 pVCpu->cpum.GstCtx.rbx = pau32Mem[7 - X86_GREG_xBX]; 527 pVCpu->cpum.GstCtx.rdx = pau32Mem[7 - X86_GREG_xDX]; 528 pVCpu->cpum.GstCtx.rcx = pau32Mem[7 - X86_GREG_xCX]; 529 pVCpu->cpum.GstCtx.rax = pau32Mem[7 - X86_GREG_xAX]; 530 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 527 531 if (rcStrict == VINF_SUCCESS) 528 532 { … … 583 587 { 584 588 GCPtrBottom--; 585 uint16_t *pa16Mem = NULL; 586 rcStrict = iemMemMap(pVCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa16Mem) - 1); 589 uint8_t bUnmapInfo; 590 uint16_t *pau16Mem = NULL; 591 rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrBottom, 592 IEM_ACCESS_STACK_W, sizeof(*pau16Mem) - 1); 587 593 if (rcStrict == VINF_SUCCESS) 588 594 { 589 pa 16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;590 pa 16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;591 pa 16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;592 pa 16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;593 pa 16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;594 pa 16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;595 pa 16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;596 pa 16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;597 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);595 pau16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di; 596 pau16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si; 597 pau16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp; 598 pau16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp; 599 pau16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx; 600 pau16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx; 601 pau16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx; 602 pau16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax; 603 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 598 604 if (rcStrict == VINF_SUCCESS) 599 605 { … … 654 660 { 655 661 GCPtrBottom--; 656 uint32_t *pa32Mem; 657 rcStrict = iemMemMap(pVCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W, sizeof(*pa32Mem) - 1); 662 uint8_t bUnmapInfo; 663 uint32_t *pau32Mem; 664 rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrBottom, 665 IEM_ACCESS_STACK_W, sizeof(*pau32Mem) - 1); 658 666 if (rcStrict == VINF_SUCCESS) 659 667 { 660 pa 32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;661 pa 32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;662 pa 32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;663 pa 32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;664 pa 32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;665 pa 32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;666 pa 32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;667 pa 32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;668 rcStrict = iemMemCommitAndUnmap(pVCpu, pa32Mem, IEM_ACCESS_STACK_W);668 pau32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi; 669 pau32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi; 670 pau32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp; 671 pau32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp; 672 pau32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx; 673 pau32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx; 674 pau32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx; 675 pau32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax; 676 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 669 677 if (rcStrict == VINF_SUCCESS) 670 678 { … … 1379 1387 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < IEM_GET_CPL(pVCpu))) 1380 1388 { 1381 uint16_t offNewStack; /* Offset of new stack in TSS. */1382 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */1383 uint8_t uNewCSDpl;1384 uint8_t cbWords;1385 RTSEL uNewSS;1386 RTSEL uOldSS;1387 uint64_t uOldRsp;1388 IEMSELDESC DescSS;1389 RTPTRUNION uPtrTSS;1390 RTGCPTR GCPtrTSS;1391 RTPTRUNION uPtrParmWds;1392 RTGCPTR GCPtrParmWds;1393 1394 1389 /* More privilege. This is the fun part. */ 1395 1390 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */ … … 1401 1396 1402 1397 /* Figure out where the new stack pointer is stored in the TSS. */ 1403 uNewCSDpl = DescCS.Legacy.Gen.u2Dpl; 1398 uint8_t const uNewCSDpl = DescCS.Legacy.Gen.u2Dpl; 1399 uint16_t offNewStack; /* Offset of new stack in TSS. */ 1400 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */ 1404 1401 if (!IEM_IS_LONG_MODE(pVCpu)) 1405 1402 { … … 1430 1427 } 1431 1428 1432 GCPtrTSS = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack; 1433 rcStrict = iemMemMap(pVCpu, &uPtrTSS.pv, cbNewStack, UINT8_MAX, GCPtrTSS, IEM_ACCESS_SYS_R, 0); 1429 uint8_t bUnmapInfo; 1430 RTPTRUNION uPtrTss; 1431 RTGCPTR GCPtrTss = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack; 1432 rcStrict = iemMemMap(pVCpu, &uPtrTss.pv, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrTss, IEM_ACCESS_SYS_R, 0); 1434 1433 if (rcStrict != VINF_SUCCESS) 1435 1434 { … … 1438 1437 } 1439 1438 1439 RTSEL uNewSS; 1440 1440 if (!IEM_IS_LONG_MODE(pVCpu)) 1441 1441 { 1442 1442 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY) 1443 1443 { 1444 uNewRsp = uPtrT SS.pu32[0];1445 uNewSS = uPtrT SS.pu16[2];1444 uNewRsp = uPtrTss.pu32[0]; 1445 uNewSS = uPtrTss.pu16[2]; 1446 1446 } 1447 1447 else 1448 1448 { 1449 1449 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY); 1450 uNewRsp = uPtrT SS.pu16[0];1451 uNewSS = uPtrT SS.pu16[1];1450 uNewRsp = uPtrTss.pu16[0]; 1451 uNewSS = uPtrTss.pu16[1]; 1452 1452 } 1453 1453 } … … 1456 1456 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY); 1457 1457 /* SS will be a NULL selector, but that's valid. */ 1458 uNewRsp = uPtrT SS.pu64[0];1458 uNewRsp = uPtrTss.pu64[0]; 1459 1459 uNewSS = uNewCSDpl; 1460 1460 } 1461 1461 1462 1462 /* Done with the TSS now. */ 1463 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrTSS.pv, IEM_ACCESS_SYS_R);1463 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 1464 1464 if (rcStrict != VINF_SUCCESS) 1465 1465 { … … 1469 1469 1470 1470 /* Only used outside of long mode. */ 1471 cbWords = pDesc->Legacy.Gate.u5ParmCount;1471 uint8_t const cbWords = pDesc->Legacy.Gate.u5ParmCount; 1472 1472 1473 1473 /* If EFER.LMA is 0, there's extra work to do. */ 1474 IEMSELDESC DescSS; 1474 1475 if (!IEM_IS_LONG_MODE(pVCpu)) 1475 1476 { … … 1548 1549 1549 1550 /* Remember the old SS:rSP and their linear address. */ 1550 uOldSS = pVCpu->cpum.GstCtx.ss.Sel;1551 u OldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;1552 1553 GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;1551 RTSEL const uOldSS = pVCpu->cpum.GstCtx.ss.Sel; 1552 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp; 1553 1554 RTGCPTR const GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp; 1554 1555 1555 1556 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS) … … 1560 1561 void *pvNewFrame; 1561 1562 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack; 1562 rcStrict = iemMemMap(pVCpu, &pvNewFrame, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);1563 rcStrict = iemMemMap(pVCpu, &pvNewFrame, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0); 1563 1564 if (rcStrict != VINF_SUCCESS) 1564 1565 { … … 1566 1567 return rcStrict; 1567 1568 } 1568 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewFrame, IEM_ACCESS_SYS_RW);1569 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 1569 1570 if (rcStrict != VINF_SUCCESS) 1570 1571 { … … 1587 1588 /* At this point the stack access must not fail because new state was already committed. */ 1588 1589 /** @todo this can still fail due to SS.LIMIT not check. */ 1590 uint8_t bUnmapInfoRet; 1589 1591 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack, 1590 1592 IEM_IS_LONG_MODE(pVCpu) ? 7 1591 1593 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1, 1592 &uPtrRet.pv, & uNewRsp);1594 &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp); 1593 1595 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)), 1594 1596 VERR_INTERNAL_ERROR_5); … … 1601 1603 { 1602 1604 /* Map the relevant chunk of the old stack. */ 1603 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, 1605 RTPTRUNION uPtrParmWds; 1606 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 4, UINT8_MAX, GCPtrParmWds, 1604 1607 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */); 1605 1608 if (rcStrict != VINF_SUCCESS) … … 1614 1617 1615 1618 /* Unmap the old stack. */ 1616 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);1619 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 1617 1620 if (rcStrict != VINF_SUCCESS) 1618 1621 { … … 1637 1640 { 1638 1641 /* Map the relevant chunk of the old stack. */ 1639 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, 1642 RTPTRUNION uPtrParmWds; 1643 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 2, UINT8_MAX, GCPtrParmWds, 1640 1644 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */); 1641 1645 if (rcStrict != VINF_SUCCESS) … … 1650 1654 1651 1655 /* Unmap the old stack. */ 1652 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtrParmWds.pv, IEM_ACCESS_DATA_R);1656 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 1653 1657 if (rcStrict != VINF_SUCCESS) 1654 1658 { … … 1678 1682 } 1679 1683 1680 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);1684 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp); 1681 1685 if (rcStrict != VINF_SUCCESS) 1682 1686 { … … 1744 1748 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in 1745 1749 * 16-bit code cause a two or four byte CS to be pushed? */ 1750 uint8_t bUnmapInfoRet; 1746 1751 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 1747 1752 IEM_IS_LONG_MODE(pVCpu) ? 8+8 … … 1749 1754 IEM_IS_LONG_MODE(pVCpu) ? 7 1750 1755 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2, 1751 &uPtrRet.pv, & uNewRsp);1756 &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp); 1752 1757 if (rcStrict != VINF_SUCCESS) 1753 1758 return rcStrict; … … 1815 1820 } 1816 1821 1817 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);1822 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp); 1818 1823 if (rcStrict != VINF_SUCCESS) 1819 1824 return rcStrict; … … 2105 2110 uint64_t uNewRsp; 2106 2111 RTPTRUNION uPtrRet; 2112 uint8_t bUnmapInfo; 2107 2113 2108 2114 /* … … 2119 2125 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2, 2120 2126 enmEffOpSize == IEMMODE_32BIT ? 3 : 1, 2121 &uPtrRet.pv, & uNewRsp);2127 &uPtrRet.pv, &bUnmapInfo, &uNewRsp); 2122 2128 if (rcStrict != VINF_SUCCESS) 2123 2129 return rcStrict; … … 2139 2145 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel; 2140 2146 } 2141 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);2147 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp); 2142 2148 if (rcStrict != VINF_SUCCESS) 2143 2149 return rcStrict; … … 2228 2234 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2, 2229 2235 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1, 2230 &uPtrRet.pv, & uNewRsp);2236 &uPtrRet.pv, &bUnmapInfo, &uNewRsp); 2231 2237 if (rcStrict != VINF_SUCCESS) 2232 2238 return rcStrict; … … 2290 2296 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */ 2291 2297 } 2292 rcStrict = iemMemStackPushCommitSpecial(pVCpu, uPtrRet.pv, uNewRsp);2298 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp); 2293 2299 if (rcStrict != VINF_SUCCESS) 2294 2300 return rcStrict; … … 2325 2331 IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop) 2326 2332 { 2327 VBOXSTRICTRC rcStrict; 2333 NOREF(cbInstr); 2334 2335 /* 2336 * Read the stack values first. 2337 */ 2338 RTUINT64U NewRsp; 2339 uint8_t bUnmapInfo; 2328 2340 RTCPTRUNION uPtrFrame; 2329 RTUINT64U NewRsp; 2341 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2 2342 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8; 2343 VBOXSTRICTRC rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr, 2344 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7, 2345 &uPtrFrame.pv, &bUnmapInfo, &NewRsp.u); 2346 if (rcStrict != VINF_SUCCESS) 2347 return rcStrict; 2348 2330 2349 uint64_t uNewRip; 2331 2350 uint16_t uNewCs; 2332 NOREF(cbInstr);2333 2334 /*2335 * Read the stack values first.2336 */2337 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+22338 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;2339 rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,2340 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,2341 &uPtrFrame.pv, &NewRsp.u);2342 if (rcStrict != VINF_SUCCESS)2343 return rcStrict;2344 2351 if (enmEffOpSize == IEMMODE_16BIT) 2345 2352 { … … 2357 2364 uNewCs = uPtrFrame.pu16[4]; 2358 2365 } 2359 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv); 2366 2367 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); 2360 2368 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 2361 2369 { /* extremely likely */ } … … 2464 2472 { 2465 2473 /* Read the outer stack pointer stored *after* the parameters. */ 2466 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, NewRsp.u);2474 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, &bUnmapInfo, NewRsp.u); 2467 2475 if (rcStrict != VINF_SUCCESS) 2468 2476 return rcStrict; … … 2485 2493 uNewOuterSs = uPtrFrame.pu16[4]; 2486 2494 } 2487 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uPtrFrame.pv);2495 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); 2488 2496 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 2489 2497 { /* extremely likely */ } … … 3103 3111 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 3104 3112 VBOXSTRICTRC rcStrict; 3113 uint8_t bUnmapInfo; 3105 3114 RTCPTRUNION uFrame; 3106 3115 uint16_t uNewCs; … … 3110 3119 if (enmEffOpSize == IEMMODE_32BIT) 3111 3120 { 3112 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, & uNewRsp);3121 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp); 3113 3122 if (rcStrict != VINF_SUCCESS) 3114 3123 return rcStrict; … … 3129 3138 else 3130 3139 { 3131 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, & uNewRsp);3140 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp); 3132 3141 if (rcStrict != VINF_SUCCESS) 3133 3142 return rcStrict; … … 3144 3153 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL); 3145 3154 } 3146 rcStrict = iemMemStackPopDoneSpecial(pVCpu, uFrame.pv);3155 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); 3147 3156 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 3148 3157 { /* extremely likely */ } … … 3251 3260 * Pop the V8086 specific frame bits off the stack. 3252 3261 */ 3253 VBOXSTRICTRC rcStrict;3254 RTCPTRUNION 3255 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, uNewRsp);3262 uint8_t bUnmapInfo; 3263 RTCPTRUNION uFrame; 3264 VBOXSTRICTRC rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp); 3256 3265 if (rcStrict != VINF_SUCCESS) 3257 3266 return rcStrict; … … 3262 3271 uint16_t uNewFs = uFrame.pu32[4]; 3263 3272 uint16_t uNewGs = uFrame.pu32[5]; 3264 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */3273 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */ 3265 3274 if (rcStrict != VINF_SUCCESS) 3266 3275 return rcStrict; … … 3382 3391 */ 3383 3392 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT); 3393 uint8_t bUnmapInfo; 3384 3394 VBOXSTRICTRC rcStrict; 3385 3395 RTCPTRUNION uFrame; … … 3390 3400 if (enmEffOpSize == IEMMODE_32BIT) 3391 3401 { 3392 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, & uNewRsp);3402 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp); 3393 3403 if (rcStrict != VINF_SUCCESS) 3394 3404 return rcStrict; … … 3399 3409 else 3400 3410 { 3401 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, & uNewRsp);3411 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp); 3402 3412 if (rcStrict != VINF_SUCCESS) 3403 3413 return rcStrict; … … 3406 3416 uNewFlags = uFrame.pu16[2]; 3407 3417 } 3408 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */3418 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */ 3409 3419 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 3410 3420 { /* extremely likely */ } … … 3491 3501 if (enmEffOpSize == IEMMODE_32BIT) 3492 3502 { 3493 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, uNewRsp);3503 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp); 3494 3504 if (rcStrict != VINF_SUCCESS) 3495 3505 return rcStrict; … … 3502 3512 else 3503 3513 { 3504 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, uNewRsp);3514 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp); 3505 3515 if (rcStrict != VINF_SUCCESS) 3506 3516 return rcStrict; … … 3508 3518 uNewSS = uFrame.pu16[1]; 3509 3519 } 3510 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);3520 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 3511 3521 if (rcStrict != VINF_SUCCESS) 3512 3522 return rcStrict; … … 3737 3747 */ 3738 3748 VBOXSTRICTRC rcStrict; 3749 uint8_t bUnmapInfo; 3739 3750 RTCPTRUNION uFrame; 3740 3751 uint64_t uNewRip; … … 3745 3756 if (enmEffOpSize == IEMMODE_64BIT) 3746 3757 { 3747 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, & uNewRsp);3758 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &bUnmapInfo, &uNewRsp); 3748 3759 if (rcStrict != VINF_SUCCESS) 3749 3760 return rcStrict; … … 3756 3767 else if (enmEffOpSize == IEMMODE_32BIT) 3757 3768 { 3758 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, & uNewRsp);3769 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp); 3759 3770 if (rcStrict != VINF_SUCCESS) 3760 3771 return rcStrict; … … 3768 3779 { 3769 3780 Assert(enmEffOpSize == IEMMODE_16BIT); 3770 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, & uNewRsp);3781 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp); 3771 3782 if (rcStrict != VINF_SUCCESS) 3772 3783 return rcStrict; … … 3777 3788 uNewSs = uFrame.pu16[4]; 3778 3789 } 3779 rcStrict = iemMemStackPopDoneSpecial(pVCpu, (void *)uFrame.pv); /* don't use iemMemStackPopCommitSpecial here. */3790 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */ 3780 3791 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 3781 3792 { /* extremely like */ } … … 4152 4163 } 4153 4164 4154 uint8_t const *pbMem = NULL; 4155 uint16_t const *pa16Mem; 4156 uint8_t const *pa8Mem; 4157 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */ 4158 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0); 4165 uint8_t bUnmapInfo; 4166 uint8_t const *pbMem = NULL; 4167 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */ 4168 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, &bUnmapInfo, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0); 4159 4169 if (rcStrict != VINF_SUCCESS) 4160 4170 return rcStrict; 4161 4171 4162 4172 /* The MSW is at offset 0x06. */ 4163 pa16Mem = (uint16_t const *)(pbMem + 0x06);4173 uint16_t const *pau16Mem = (uint16_t const *)(pbMem + 0x06); 4164 4174 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */ 4165 4175 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 4166 uNewCr0 |= *pa 16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);4176 uNewCr0 |= *pau16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 4167 4177 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0; 4168 4178 … … 4181 4191 4182 4192 /* TR selector is at offset 0x16. */ 4183 pa 16Mem = (uint16_t const *)(pbMem + 0x16);4184 pVCpu->cpum.GstCtx.tr.Sel = pa 16Mem[0];4185 pVCpu->cpum.GstCtx.tr.ValidSel = pa 16Mem[0];4193 pau16Mem = (uint16_t const *)(pbMem + 0x16); 4194 pVCpu->cpum.GstCtx.tr.Sel = pau16Mem[0]; 4195 pVCpu->cpum.GstCtx.tr.ValidSel = pau16Mem[0]; 4186 4196 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID; 4187 4197 4188 4198 /* Followed by FLAGS... */ 4189 pVCpu->cpum.GstCtx.eflags.u = pa 16Mem[1] | X86_EFL_1;4190 pVCpu->cpum.GstCtx.ip = pa 16Mem[2]; /* ...and IP. */4199 pVCpu->cpum.GstCtx.eflags.u = pau16Mem[1] | X86_EFL_1; 4200 pVCpu->cpum.GstCtx.ip = pau16Mem[2]; /* ...and IP. */ 4191 4201 4192 4202 /* LDT is at offset 0x1C. */ 4193 pa 16Mem = (uint16_t const *)(pbMem + 0x1C);4194 pVCpu->cpum.GstCtx.ldtr.Sel = pa 16Mem[0];4195 pVCpu->cpum.GstCtx.ldtr.ValidSel = pa 16Mem[0];4203 pau16Mem = (uint16_t const *)(pbMem + 0x1C); 4204 pVCpu->cpum.GstCtx.ldtr.Sel = pau16Mem[0]; 4205 pVCpu->cpum.GstCtx.ldtr.ValidSel = pau16Mem[0]; 4196 4206 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 4197 4207 4198 4208 /* Segment registers are at offset 0x1E. */ 4199 pa 16Mem = (uint16_t const *)(pbMem + 0x1E);4200 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pa 16Mem[0]);4201 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pa 16Mem[1]);4202 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pa 16Mem[2]);4203 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pa 16Mem[3]);4209 pau16Mem = (uint16_t const *)(pbMem + 0x1E); 4210 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pau16Mem[0]); 4211 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pau16Mem[1]); 4212 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pau16Mem[2]); 4213 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pau16Mem[3]); 4204 4214 4205 4215 /* GPRs are at offset 0x26. */ 4206 pa 16Mem = (uint16_t const *)(pbMem + 0x26);4207 pVCpu->cpum.GstCtx.di = pa 16Mem[0];4208 pVCpu->cpum.GstCtx.si = pa 16Mem[1];4209 pVCpu->cpum.GstCtx.bp = pa 16Mem[2];4210 pVCpu->cpum.GstCtx.sp = pa 16Mem[3];4211 pVCpu->cpum.GstCtx.bx = pa 16Mem[4];4212 pVCpu->cpum.GstCtx.dx = pa 16Mem[5];4213 pVCpu->cpum.GstCtx.cx = pa 16Mem[6];4214 pVCpu->cpum.GstCtx.ax = pa 16Mem[7];4216 pau16Mem = (uint16_t const *)(pbMem + 0x26); 4217 pVCpu->cpum.GstCtx.di = pau16Mem[0]; 4218 pVCpu->cpum.GstCtx.si = pau16Mem[1]; 4219 pVCpu->cpum.GstCtx.bp = pau16Mem[2]; 4220 pVCpu->cpum.GstCtx.sp = pau16Mem[3]; 4221 pVCpu->cpum.GstCtx.bx = pau16Mem[4]; 4222 pVCpu->cpum.GstCtx.dx = pau16Mem[5]; 4223 pVCpu->cpum.GstCtx.cx = pau16Mem[6]; 4224 pVCpu->cpum.GstCtx.ax = pau16Mem[7]; 4215 4225 4216 4226 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */ … … 4221 4231 4222 4232 /* GDTR contents are at offset 0x4E, 6 bytes. */ 4223 RTGCPHYS GCPtrBase; 4224 uint16_t cbLimit; 4225 pa8Mem = pbMem + 0x4E; 4233 uint8_t const *pau8Mem = pbMem + 0x4E; 4226 4234 /* NB: Fourth byte "should be zero"; we are ignoring it. */ 4227 GCPtrBase = pa8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);4228 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);4235 RTGCPHYS GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16); 4236 uint16_t cbLimit = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8); 4229 4237 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit); 4230 4238 4231 4239 /* IDTR contents are at offset 0x5A, 6 bytes. */ 4232 pa 8Mem= pbMem + 0x5A;4233 GCPtrBase = pa 8Mem[0] + (pa8Mem[1] << 8) + (pa8Mem[2] << 16);4234 cbLimit = pa8Mem[4] + (pa8Mem[5] << 8);4240 pau8Mem = pbMem + 0x5A; 4241 GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16); 4242 cbLimit = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8); 4235 4243 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit); 4236 4244 … … 4242 4250 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx)); 4243 4251 4244 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pbMem, IEM_ACCESS_SYS_R);4252 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 4245 4253 if (rcStrict != VINF_SUCCESS) 4246 4254 return rcStrict; … … 5731 5739 * assembly and such. 5732 5740 */ 5733 void *pvDesc; 5734 rcStrict = iemMemMap(pVCpu, &pvDesc, 8, UINT8_MAX, pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), 5735 IEM_ACCESS_DATA_RW, 0); 5741 uint8_t bUnmapInfo; 5742 void *pvDesc; 5743 rcStrict = iemMemMap(pVCpu, &pvDesc, &bUnmapInfo, 8, UINT8_MAX, 5744 pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW, 0); 5736 5745 if (rcStrict != VINF_SUCCESS) 5737 5746 return rcStrict; … … 5743 5752 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break; 5744 5753 } 5745 rcStrict = iemMemCommitAndUnmap(pVCpu, pvDesc, IEM_ACCESS_DATA_RW);5754 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 5746 5755 if (rcStrict != VINF_SUCCESS) 5747 5756 return rcStrict; … … 8161 8170 if (cbToMap > 512) 8162 8171 cbToMap = 512; 8172 uint8_t bUnmapInfo; 8163 8173 void *pvSrc = NULL; 8164 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, cbToMap, UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0); 8174 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, &bUnmapInfo, cbToMap, 8175 UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0); 8165 8176 if (rcStrict == VINF_SUCCESS) 8166 8177 { 8167 8178 RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:"); 8168 rcStrict = iemMemCommitAndUnmap(pVCpu, pvSrc, IEM_ACCESS_DATA_R);8179 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 8169 8180 AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 8170 8181 } … … 8654 8665 * Implements 'CMPXCHG16B' fallback using rendezvous. 8655 8666 */ 8656 IEM_CIMPL_DEF_ 4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,8657 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags )8667 IEM_CIMPL_DEF_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx, 8668 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo) 8658 8669 { 8659 8670 # ifdef IN_RING3 … … 8672 8683 { 8673 8684 /* Duplicated tail code. */ 8674 rcStrict = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_RW);8685 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 8675 8686 if (rcStrict == VINF_SUCCESS) 8676 8687 { … … 8686 8697 return rcStrict; 8687 8698 # else 8688 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags );8699 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags, bUnmapInfo); 8689 8700 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */ 8690 8701 # endif … … 8818 8829 * Access the memory. 8819 8830 */ 8820 void *pvMem512; 8821 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 8831 uint8_t bUnmapInfo; 8832 void *pvMem512; 8833 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512, 8834 iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 8822 8835 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 8823 8836 if (rcStrict != VINF_SUCCESS) … … 8881 8894 * Commit the memory. 8882 8895 */ 8883 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);8896 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 8884 8897 if (rcStrict != VINF_SUCCESS) 8885 8898 return rcStrict; … … 8911 8924 * Access the memory. 8912 8925 */ 8913 void *pvMem512; 8914 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R, 8926 uint8_t bUnmapInfo; 8927 void *pvMem512; 8928 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R, 8915 8929 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 8916 8930 if (rcStrict != VINF_SUCCESS) … … 8998 9012 * Unmap the memory. 8999 9013 */ 9000 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);9014 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 9001 9015 if (rcStrict != VINF_SUCCESS) 9002 9016 return rcStrict; … … 9051 9065 */ 9052 9066 /* The x87+SSE state. */ 9053 void *pvMem512; 9054 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 9067 uint8_t bUnmapInfoMem512; 9068 void *pvMem512; 9069 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512, 9070 iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 9055 9071 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 9056 9072 if (rcStrict != VINF_SUCCESS) … … 9060 9076 9061 9077 /* The header. */ 9078 uint8_t bUnmapInfoHdr; 9062 9079 PX86XSAVEHDR pHdr; 9063 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */); 9080 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, &bUnmapInfoHdr, sizeof(pHdr), 9081 iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */); 9064 9082 if (rcStrict != VINF_SUCCESS) 9065 9083 return rcStrict; … … 9119 9137 9120 9138 /* Commit the x87 state bits. (probably wrong) */ 9121 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);9139 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512); 9122 9140 if (rcStrict != VINF_SUCCESS) 9123 9141 return rcStrict; … … 9130 9148 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */ 9131 9149 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9); 9150 uint8_t bUnmapInfoComp; 9132 9151 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI); 9133 9152 PX86XSAVEYMMHI pCompDst; 9134 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], 9153 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, &bUnmapInfoComp, sizeof(*pCompDst), iEffSeg, 9154 GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], 9135 9155 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */); 9136 9156 if (rcStrict != VINF_SUCCESS) … … 9141 9161 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i]; 9142 9162 9143 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);9163 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp); 9144 9164 if (rcStrict != VINF_SUCCESS) 9145 9165 return rcStrict; … … 9152 9172 | (fReqComponents & fXInUse); 9153 9173 9154 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW);9174 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr); 9155 9175 if (rcStrict != VINF_SUCCESS) 9156 9176 return rcStrict; … … 9207 9227 */ 9208 9228 /* The x87+SSE state. */ 9209 void *pvMem512; 9210 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R, 9229 uint8_t bUnmapInfoMem512; 9230 void *pvMem512; 9231 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R, 9211 9232 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC); 9212 9233 if (rcStrict != VINF_SUCCESS) … … 9218 9239 * Calc the requested mask 9219 9240 */ 9241 uint8_t bUnmapInfoHdr; 9220 9242 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr; 9221 9243 PCX86XSAVEHDR pHdrSrc; 9222 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512,9244 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, &bUnmapInfoHdr, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, 9223 9245 IEM_ACCESS_DATA_R, 0 /* checked above */); 9224 9246 if (rcStrict != VINF_SUCCESS) … … 9236 9258 9237 9259 /* We won't need this any longer. */ 9238 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R);9260 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr); 9239 9261 if (rcStrict != VINF_SUCCESS) 9240 9262 return rcStrict; … … 9335 9357 9336 9358 /* Unmap the x87 state bits (so we've don't run out of mapping). */ 9337 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R);9359 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512); 9338 9360 if (rcStrict != VINF_SUCCESS) 9339 9361 return rcStrict; … … 9350 9372 { 9351 9373 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */ 9374 uint8_t bUnmapInfoComp; 9352 9375 PCX86XSAVEYMMHI pCompSrc; 9353 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst),9376 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, &bUnmapInfoComp, sizeof(*pCompDst), 9354 9377 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT], 9355 9378 IEM_ACCESS_DATA_R, 0 /* checked above */); … … 9363 9386 } 9364 9387 9365 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R);9388 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp); 9366 9389 if (rcStrict != VINF_SUCCESS) 9367 9390 return rcStrict; … … 9652 9675 IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 9653 9676 { 9677 uint8_t bUnmapInfo; 9654 9678 RTPTRUNION uPtr; 9655 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,9679 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, 9656 9680 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 9657 9681 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */); … … 9661 9685 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr); 9662 9686 9663 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);9687 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 9664 9688 if (rcStrict != VINF_SUCCESS) 9665 9689 return rcStrict; … … 9696 9720 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87); 9697 9721 9722 uint8_t bUnmapInfo; 9698 9723 RTPTRUNION uPtr; 9699 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,9724 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, 9700 9725 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */); 9701 9726 if (rcStrict != VINF_SUCCESS) … … 9712 9737 } 9713 9738 9714 rcStrict = iemMemCommitAndUnmap(pVCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);9739 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 9715 9740 if (rcStrict != VINF_SUCCESS) 9716 9741 return rcStrict; … … 9748 9773 IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc) 9749 9774 { 9775 uint8_t bUnmapInfo; 9750 9776 RTCPTRUNION uPtr; 9751 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,9777 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28, 9752 9778 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 9753 9779 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/); … … 9757 9783 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr); 9758 9784 9759 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);9785 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 9760 9786 if (rcStrict != VINF_SUCCESS) 9761 9787 return rcStrict; … … 9775 9801 IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc) 9776 9802 { 9803 uint8_t bUnmapInfo; 9777 9804 RTCPTRUNION uPtr; 9778 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,9805 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108, 9779 9806 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ ); 9780 9807 if (rcStrict != VINF_SUCCESS) … … 9792 9819 } 9793 9820 9794 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);9821 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 9795 9822 if (rcStrict != VINF_SUCCESS) 9796 9823 return rcStrict; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r100804 r102430 1212 1212 #endif 1213 1213 1214 uint8_t bUnmapInfo; 1214 1215 OP_TYPE *puMem; 1215 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI,1216 rcStrict = iemMemMap(pVCpu, (void **)&puMem, &bUnmapInfo, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI, 1216 1217 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1); 1217 1218 if (rcStrict != VINF_SUCCESS) … … 1227 1228 *puMem = (OP_TYPE)u32Value; 1228 1229 # ifdef IN_RING3 1229 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);1230 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 1230 1231 # else 1231 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);1232 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, bUnmapInfo); 1232 1233 # endif 1233 1234 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS)) … … 1417 1418 do 1418 1419 { 1420 uint8_t bUnmapInfo; 1419 1421 OP_TYPE *puMem; 1420 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg,1422 rcStrict = iemMemMap(pVCpu, (void **)&puMem, &bUnmapInfo, OP_SIZE / 8, X86_SREG_ES, uAddrReg, 1421 1423 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1); 1422 1424 if (rcStrict != VINF_SUCCESS) … … 1433 1435 *puMem = (OP_TYPE)u32Value; 1434 1436 # ifdef IN_RING3 1435 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);1437 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 1436 1438 # else 1437 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);1439 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, bUnmapInfo); 1438 1440 # endif 1439 1441 if (rcStrict2 == VINF_SUCCESS) -
trunk/src/VBox/VMM/VMMAll/IEMAllInstTwoByte0f.cpp.h
r102428 r102430 12481 12481 * the patterns IEMAllThrdPython.py requires for the code morphing. 12482 12482 */ 12483 #define BODY_CMPXCHG16B_HEAD \12484 IEM_MC_BEGIN( 4, 4, IEM_MC_F_64BIT, 0); \12483 #define BODY_CMPXCHG16B_HEAD(bUnmapInfoStmt) \ 12484 IEM_MC_BEGIN(5, 4, IEM_MC_F_64BIT, 0); \ 12485 12485 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 12486 12486 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ … … 12488 12488 \ 12489 12489 IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(GCPtrEffDst, 16); \ 12490 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \12490 bUnmapInfoStmt; \ 12491 12491 IEM_MC_ARG(PRTUINT128U, pu128MemDst, 0); \ 12492 12492 IEM_MC_MEM_MAP_U128_RW(pu128MemDst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ … … 12518 12518 && (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 12519 12519 { 12520 BODY_CMPXCHG16B_HEAD ;12520 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo)); 12521 12521 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12522 12522 BODY_CMPXCHG16B_TAIL; … … 12524 12524 else 12525 12525 { 12526 BODY_CMPXCHG16B_HEAD ;12526 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo)); 12527 12527 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12528 12528 BODY_CMPXCHG16B_TAIL; … … 12533 12533 if (pVCpu->CTX_SUFF(pVM)->cCpus == 1) 12534 12534 { 12535 BODY_CMPXCHG16B_HEAD ;12535 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo)); 12536 12536 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12537 12537 BODY_CMPXCHG16B_TAIL; … … 12539 12539 else 12540 12540 { 12541 BODY_CMPXCHG16B_HEAD ;12542 IEM_MC_CALL_CIMPL_ 4(IEM_CIMPL_F_STATUS_FLAGS,12541 BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4)); 12542 IEM_MC_CALL_CIMPL_5(IEM_CIMPL_F_STATUS_FLAGS, 12543 12543 RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xAX) 12544 12544 | RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xDX), 12545 iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12545 iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, 12546 pEFlags, bUnmapInfo); 12546 12547 IEM_MC_END(); 12547 12548 } … … 12552 12553 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 12553 12554 { 12554 BODY_CMPXCHG16B_HEAD ;12555 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo)); 12555 12556 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12556 12557 BODY_CMPXCHG16B_TAIL; … … 12558 12559 else 12559 12560 { 12560 BODY_CMPXCHG16B_HEAD ;12561 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo)); 12561 12562 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12562 12563 BODY_CMPXCHG16B_TAIL; … … 12570 12571 if (pVCpu->CTX_SUFF(pVM)->cCpus == 1) 12571 12572 { 12572 BODY_CMPXCHG16B_HEAD ;12573 BODY_CMPXCHG16B_HEAD(IEM_MC_LOCAL(uint8_t, bUnmapInfo)); 12573 12574 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12574 12575 BODY_CMPXCHG16B_TAIL; … … 12576 12577 else 12577 12578 { 12578 BODY_CMPXCHG16B_HEAD ;12579 BODY_CMPXCHG16B_HEAD(IEM_MC_ARG(uint8_t, bUnmapInfo, 4)); 12579 12580 IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS, 12580 12581 RT_BIT_64(kIemNativeGstReg_GprFirst + X86_GREG_xAX) -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r102427 r102430 55 55 { 56 56 /* The lazy approach for now... */ 57 uint8_t bUnmapInfo; 57 58 TMPL_MEM_TYPE const *puSrc; 58 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(*puSrc), iSegReg, GCPtrMem,59 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem, 59 60 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); 60 61 if (rc == VINF_SUCCESS) 61 62 { 62 63 *puDst = *puSrc; 63 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);64 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 64 65 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst)); 65 66 } … … 79 80 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++; 80 81 # endif 81 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*pSrc), iSegReg, GCPtrMem, 82 uint8_t bUnmapInfo; 83 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem, 82 84 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); 83 85 *pDst = *pSrc; 84 iemMemCommitAndUnmapJmp(pVCpu, (void *)pSrc, IEM_ACCESS_DATA_R);86 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 85 87 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst)); 86 88 } … … 92 94 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++; 93 95 # endif 94 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*puSrc), iSegReg, GCPtrMem, 96 uint8_t bUnmapInfo; 97 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem, 95 98 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); 96 99 TMPL_MEM_TYPE const uRet = *puSrc; 97 iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);100 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 98 101 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet)); 99 102 return uRet; … … 118 121 { 119 122 /* The lazy approach for now... */ 123 uint8_t bUnmapInfo; 120 124 TMPL_MEM_TYPE *puDst; 121 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(*puDst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN); 125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst), 126 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN); 122 127 if (rc == VINF_SUCCESS) 123 128 { … … 127 132 *puDst = uValue; 128 133 #endif 129 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_DATA_W);134 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 130 135 #ifdef TMPL_MEM_BY_REF 131 136 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue)); … … 163 168 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue)); 164 169 #endif 165 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(*puDst), iSegReg, GCPtrMem, 170 uint8_t bUnmapInfo; 171 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem, 166 172 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN); 167 173 #ifdef TMPL_MEM_BY_REF … … 170 176 *puDst = uValue; 171 177 #endif 172 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_DATA_W);178 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 173 179 } 174 180 #endif /* IEM_WITH_SETJMP */ … … 196 202 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 197 203 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ 198 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN); 204 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 205 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN); 199 206 } 200 207 … … 219 226 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 220 227 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */ 221 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN); 228 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 229 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN); 222 230 } 223 231 … … 242 250 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 243 251 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */ 244 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); 252 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 253 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); 245 254 } 246 255 … … 265 274 266 275 /* Write the dword the lazy way. */ 276 uint8_t bUnmapInfo; 267 277 TMPL_MEM_TYPE *puDst; 268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,269 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);278 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 279 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN); 270 280 if (rc == VINF_SUCCESS) 271 281 { 272 282 *puDst = uValue; 273 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);283 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 274 284 275 285 /* Commit the new RSP value unless we an access handler made trouble. */ … … 302 312 303 313 /* Write the word the lazy way. */ 314 uint8_t bUnmapInfo; 304 315 TMPL_MEM_TYPE const *puSrc; 305 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,316 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 306 317 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN); 307 318 if (rc == VINF_SUCCESS) 308 319 { 309 320 *puValue = *puSrc; 310 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);321 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 311 322 312 323 /* Commit the new RSP value. */ … … 339 350 340 351 /* Write the word the lazy way. */ 352 uint8_t bUnmapInfo; 341 353 TMPL_MEM_TYPE *puDst; 342 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,343 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);354 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 355 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN); 344 356 if (rc == VINF_SUCCESS) 345 357 { 346 358 *puDst = uValue; 347 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);359 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 348 360 349 361 /* Commit the new RSP value unless we an access handler made trouble. */ … … 377 389 378 390 /* Write the word the lazy way. */ 391 uint8_t bUnmapInfo; 379 392 TMPL_MEM_TYPE const *puSrc; 380 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,393 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 381 394 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN); 382 395 if (rc == VINF_SUCCESS) 383 396 { 384 397 *puValue = *puSrc; 385 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);398 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); 386 399 387 400 /* Commit the new RSP value. */ … … 414 427 415 428 /* Write the data. */ 416 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 429 uint8_t bUnmapInfo; 430 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 417 431 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN); 418 432 *puDst = uValue; 419 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);433 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 420 434 421 435 /* Commit the RSP change. */ … … 440 454 441 455 /* Read the data. */ 442 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 443 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN); 456 uint8_t bUnmapInfo; 457 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, 458 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN); 444 459 TMPL_MEM_TYPE const uRet = *puSrc; 445 iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);460 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 446 461 447 462 /* Commit the RSP change and return the popped value. */ … … 478 493 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check 479 494 * ancient hardware when it actually did change. */ 480 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(uint16_t), X86_SREG_SS, GCPtrTop, 495 uint8_t bUnmapInfo; 496 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop, 481 497 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */ 482 498 *puDst = (uint16_t)uValue; 483 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);499 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); 484 500 485 501 /* Commit the RSP change. */ -
trunk/src/VBox/VMM/include/IEMInline.h
r102428 r102430 3715 3715 #ifdef IEM_WITH_SETJMP 3716 3716 3717 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, void *pvMem,uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP3717 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3718 3718 { 3719 3719 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) … … 3721 3721 return; 3722 3722 # endif 3723 iemMemCommitAndUnmapRwSafeJmp(pVCpu, pvMem,bMapInfo);3724 } 3725 3726 3727 DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, void *pvMem,uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP3723 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo); 3724 } 3725 3726 3727 DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3728 3728 { 3729 3729 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) … … 3731 3731 return; 3732 3732 # endif 3733 iemMemCommitAndUnmapWoSafeJmp(pVCpu, pvMem,bMapInfo);3734 } 3735 3736 3737 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, const void *pvMem,uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP3733 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo); 3734 } 3735 3736 3737 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3738 3738 { 3739 3739 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) … … 3741 3741 return; 3742 3742 # endif 3743 iemMemCommitAndUnmapRoSafeJmp(pVCpu, pvMem,bMapInfo);3744 } 3745 3746 DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, const void *pvMem,uint8_t bMapInfo) RT_NOEXCEPT3743 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo); 3744 } 3745 3746 DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT 3747 3747 { 3748 3748 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) … … 3750 3750 return; 3751 3751 # endif 3752 iemMemRollbackAndUnmapWoSafe(pVCpu, pvMem,bMapInfo);3752 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo); 3753 3753 } 3754 3754 -
trunk/src/VBox/VMM/include/IEMInternal.h
r102428 r102430 5039 5039 #define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18) 5040 5040 5041 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,5041 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, 5042 5042 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT; 5043 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;5043 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT; 5044 5044 #ifndef IN_RING3 5045 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;5046 #endif 5047 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;5045 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT; 5046 #endif 5047 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT; 5048 5048 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT; 5049 5049 VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT; … … 5181 5181 PCRTUINT128U iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5182 5182 5183 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5184 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5185 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5186 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) RT_NOEXCEPT; 5183 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5184 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5185 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5186 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP; 5187 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT; 5187 5188 #endif 5188 5189 5189 5190 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 5190 void **ppvMem, uint 64_t *puNewRsp) RT_NOEXCEPT;5191 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;5191 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT; 5192 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT; 5192 5193 VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT; 5193 5194 VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT; … … 5198 5199 VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT; 5199 5200 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign, 5200 void const **ppvMem, uint 64_t *puNewRsp) RT_NOEXCEPT;5201 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT; 5201 5202 VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem, 5202 void const **ppvMem, uint 64_t uCurNewRsp) RT_NOEXCEPT;5203 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;5203 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT; 5204 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT; 5204 5205 VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT; 5205 5206 VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT; … … 5334 5335 IEM_CIMPL_PROTO_0(iemCImpl_xgetbv); 5335 5336 IEM_CIMPL_PROTO_0(iemCImpl_xsetbv); 5336 IEM_CIMPL_PROTO_ 4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,5337 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags );5337 IEM_CIMPL_PROTO_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx, 5338 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo); 5338 5339 IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff); 5339 5340 IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts); -
trunk/src/VBox/VMM/include/IEMMc.h
r102429 r102430 1519 1519 */ 1520 1520 #ifndef IEM_WITH_SETJMP 1521 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1522 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \ 1523 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)); \ 1524 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 1525 } while (0) 1521 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1522 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1523 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)) 1526 1524 #else 1527 1525 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1540 1538 */ 1541 1539 #ifndef IEM_WITH_SETJMP 1542 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1543 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \ 1544 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)); \ 1545 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 1546 } while (0) 1540 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1541 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1542 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)) 1547 1543 #else 1548 1544 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1561 1557 */ 1562 1558 #ifndef IEM_WITH_SETJMP 1563 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1564 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), (a_iSeg), \ 1565 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)); \ 1566 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 1567 } while (0) 1559 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1560 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \ 1561 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)) 1568 1562 #else 1569 1563 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1582 1576 */ 1583 1577 #ifndef IEM_WITH_SETJMP 1584 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1585 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \ 1586 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)); \ 1587 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 1588 } while (0) 1578 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1579 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1580 (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0)) 1589 1581 #else 1590 1582 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1603 1595 */ 1604 1596 #ifndef IEM_WITH_SETJMP 1605 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1606 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \ 1607 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)); \ 1608 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 1609 } while (0) 1597 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1598 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1599 (a_GCPtrMem), IEM_ACCESS_DATA_W, 0)) 1610 1600 #else 1611 1601 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1624 1614 */ 1625 1615 #ifndef IEM_WITH_SETJMP 1626 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1627 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), sizeof(uint8_t), UINT8_MAX, \ 1628 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)); \ 1629 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 1630 } while (0) 1616 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ 1617 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \ 1618 (a_GCPtrMem), IEM_ACCESS_DATA_R, 0)) 1631 1619 #else 1632 1620 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1648 1636 */ 1649 1637 #ifndef IEM_WITH_SETJMP 1650 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1651 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \ 1652 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)); \ 1653 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 1654 } while (0) 1638 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1639 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1640 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)) 1655 1641 #else 1656 1642 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1669 1655 */ 1670 1656 #ifndef IEM_WITH_SETJMP 1671 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1672 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \ 1673 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)); \ 1674 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 1675 } while (0) 1657 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1658 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1659 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)) 1676 1660 #else 1677 1661 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1690 1674 */ 1691 1675 #ifndef IEM_WITH_SETJMP 1692 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1693 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), (a_iSeg), \ 1694 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)); \ 1695 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 1696 } while (0) 1676 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1677 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \ 1678 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)) 1697 1679 #else 1698 1680 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1711 1693 */ 1712 1694 #ifndef IEM_WITH_SETJMP 1713 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1714 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \ 1715 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)); \ 1716 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 1717 } while (0) 1695 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1696 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1697 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1)) 1718 1698 #else 1719 1699 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1732 1712 */ 1733 1713 #ifndef IEM_WITH_SETJMP 1734 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1735 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \ 1736 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)); \ 1737 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 1738 } while (0) 1714 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1715 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1716 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1)) 1739 1717 #else 1740 1718 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1753 1731 */ 1754 1732 #ifndef IEM_WITH_SETJMP 1755 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1756 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), sizeof(uint16_t), UINT8_MAX, \ 1757 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)); \ 1758 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 1759 } while (0) 1733 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ 1734 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \ 1735 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1)) 1760 1736 #else 1761 1737 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1795 1771 */ 1796 1772 #ifndef IEM_WITH_SETJMP 1797 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1798 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \ 1799 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)); \ 1800 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 1801 } while (0) 1773 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1774 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 1775 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)) 1802 1776 #else 1803 1777 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1816 1790 */ 1817 1791 #ifndef IEM_WITH_SETJMP 1818 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1819 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \ 1820 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)); \ 1821 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 1822 } while (0) 1792 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1793 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 1794 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)) 1823 1795 #else 1824 1796 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1837 1809 */ 1838 1810 #ifndef IEM_WITH_SETJMP 1839 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1840 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), (a_iSeg), \ 1841 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)); \ 1842 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 1843 } while (0) 1811 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1812 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \ 1813 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)) 1844 1814 #else 1845 1815 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1858 1828 */ 1859 1829 #ifndef IEM_WITH_SETJMP 1860 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1861 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \ 1862 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)); \ 1863 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 1864 } while (0) 1830 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1831 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 1832 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1)) 1865 1833 #else 1866 1834 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1879 1847 */ 1880 1848 #ifndef IEM_WITH_SETJMP 1881 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1882 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \ 1883 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)); \ 1884 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 1885 } while (0) 1849 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1850 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 1851 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1)) 1886 1852 #else 1887 1853 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1900 1866 */ 1901 1867 #ifndef IEM_WITH_SETJMP 1902 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 1903 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), sizeof(uint32_t), UINT8_MAX, \ 1904 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)); \ 1905 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 1906 } while (0) 1868 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ 1869 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \ 1870 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1)) 1907 1871 #else 1908 1872 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 1960 1924 */ 1961 1925 #ifndef IEM_WITH_SETJMP 1962 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1963 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \ 1964 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)); \ 1965 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 1966 } while (0) 1926 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1927 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 1928 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)) 1967 1929 #else 1968 1930 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 1981 1943 */ 1982 1944 #ifndef IEM_WITH_SETJMP 1983 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 1984 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \ 1985 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 1986 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 1987 } while (0) 1945 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1946 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 1947 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 1988 1948 #else 1989 1949 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 2002 1962 */ 2003 1963 #ifndef IEM_WITH_SETJMP 2004 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2005 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), (a_iSeg), \ 2006 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)); \ 2007 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 2008 } while (0) 1964 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 1965 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \ 1966 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)) 2009 1967 #else 2010 1968 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 2023 1981 */ 2024 1982 #ifndef IEM_WITH_SETJMP 2025 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2026 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \ 2027 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)); \ 2028 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 2029 } while (0) 1983 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 1984 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 1985 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1)) 2030 1986 #else 2031 1987 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2044 2000 */ 2045 2001 #ifndef IEM_WITH_SETJMP 2046 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2047 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \ 2048 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2049 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2050 } while (0) 2002 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2003 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 2004 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2051 2005 #else 2052 2006 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2065 2019 */ 2066 2020 #ifndef IEM_WITH_SETJMP 2067 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2068 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), sizeof(uint64_t), UINT8_MAX, \ 2069 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)); \ 2070 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 2071 } while (0) 2021 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ 2022 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \ 2023 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1)) 2072 2024 #else 2073 2025 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2125 2077 */ 2126 2078 #ifndef IEM_WITH_SETJMP 2127 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2128 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128U), (a_iSeg), \ 2129 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1)); \ 2130 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 2131 } while (0) 2079 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2080 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \ 2081 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1)) 2132 2082 #else 2133 2083 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 2146 2096 */ 2147 2097 #ifndef IEM_WITH_SETJMP 2148 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2149 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), (a_iSeg), \ 2150 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)); \ 2151 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2152 } while (0) 2098 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2099 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \ 2100 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)) 2153 2101 #else 2154 2102 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 2167 2115 */ 2168 2116 #ifndef IEM_WITH_SETJMP 2169 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2170 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), (a_iSeg), \ 2171 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)); \ 2172 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 2173 } while (0) 2117 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2118 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \ 2119 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)) 2174 2120 #else 2175 2121 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 2188 2134 */ 2189 2135 #ifndef IEM_WITH_SETJMP 2190 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2191 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), UINT8_MAX, \ 2192 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1)); \ 2193 a_bUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); \ 2194 } while (0) 2136 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2137 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2138 (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1)) 2195 2139 #else 2196 2140 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2209 2153 */ 2210 2154 #ifndef IEM_WITH_SETJMP 2211 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2212 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), UINT8_MAX, \ 2213 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)); \ 2214 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2215 } while (0) 2155 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2156 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2157 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1)) 2216 2158 #else 2217 2159 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2230 2172 */ 2231 2173 #ifndef IEM_WITH_SETJMP 2232 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2233 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), sizeof(RTUINT128), UINT8_MAX, \ 2234 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)); \ 2235 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); \ 2236 } while (0) 2174 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ 2175 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \ 2176 (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1)) 2237 2177 #else 2238 2178 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2254 2194 */ 2255 2195 #ifndef IEM_WITH_SETJMP 2256 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2257 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), (a_iSeg), \ 2258 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2259 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2260 } while (0) 2196 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2197 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \ 2198 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2261 2199 #else 2262 2200 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 2274 2212 */ 2275 2213 #ifndef IEM_WITH_SETJMP 2276 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2277 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), sizeof(RTFLOAT80U), UINT8_MAX, \ 2278 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2279 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2280 } while (0) 2214 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2215 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \ 2216 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2281 2217 #else 2282 2218 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2296 2232 */ 2297 2233 #ifndef IEM_WITH_SETJMP 2298 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) do { \ 2299 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), sizeof(RTFLOAT80U), (a_iSeg), \ 2300 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2301 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2302 } while (0) 2234 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ 2235 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \ 2236 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2303 2237 #else 2304 2238 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \ … … 2316 2250 */ 2317 2251 #ifndef IEM_WITH_SETJMP 2318 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) do { \ 2319 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), sizeof(RTFLOAT80U), UINT8_MAX, \ 2320 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)); \ 2321 a_bUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); \ 2322 } while (0) 2252 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \ 2253 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \ 2254 (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1)) 2323 2255 #else 2324 2256 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \ … … 2334 2266 */ 2335 2267 #ifndef IEM_WITH_SETJMP 2336 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) do { \ 2337 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); \ 2338 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_RW)); \ 2339 } while (0) 2340 #else 2341 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) \ 2342 iemMemCommitAndUnmapRwJmp(pVCpu, (a_pvMem), (a_bMapInfo)) 2268 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2269 #else 2270 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_pvMem, a_bMapInfo) iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo)) 2343 2271 #endif 2344 2272 … … 2347 2275 */ 2348 2276 #ifndef IEM_WITH_SETJMP 2349 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \ 2350 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2351 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \ 2352 } while (0) 2353 #else 2354 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \ 2355 iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), (a_bMapInfo)) 2277 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2278 #else 2279 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_pvMem, a_bMapInfo) iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo)) 2356 2280 #endif 2357 2281 … … 2360 2284 */ 2361 2285 #ifndef IEM_WITH_SETJMP 2362 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) do { \ 2363 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); \ 2364 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (void *)(a_pvMem), IEM_ACCESS_DATA_R)); \ 2365 } while (0) 2366 #else 2367 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) \ 2368 iemMemCommitAndUnmapRoJmp(pVCpu, (a_pvMem), (a_bMapInfo)) 2286 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)) 2287 #else 2288 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_pvMem, a_bMapInfo) iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo)) 2369 2289 #endif 2370 2290 … … 2378 2298 * 2379 2299 * @remarks May in theory return - for now. 2380 * 2381 * @deprecated 2382 */ 2383 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \ 2384 do { \ 2385 if ( !(a_u16FSW & X86_FSW_ES) \ 2386 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2387 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2388 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \ 2389 } while (0) 2390 2391 2392 /** Commits the memory and unmaps the guest memory unless the FPU status word 2393 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception 2394 * that would cause FLD not to store. 2395 * 2396 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a 2397 * store, while \#P will not. 2398 * 2399 * @remarks May in theory return - for now. 2400 */ 2401 #ifndef IEM_WITH_SETJMP 2402 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2403 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \ 2404 if ( !(a_u16FSW & X86_FSW_ES) \ 2405 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2406 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2407 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W)); \ 2408 else \ 2409 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2410 } while (0) 2411 #else 2300 */ 2301 #ifndef IEM_WITH_SETJMP 2412 2302 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2413 2303 if ( !(a_u16FSW & X86_FSW_ES) \ 2414 2304 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2415 2305 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2416 iemMemCommitAndUnmapWoJmp(pVCpu, (a_pvMem), a_bMapInfo); \2306 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)); \ 2417 2307 else \ 2418 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo); \ 2308 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \ 2309 } while (0) 2310 #else 2311 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_pvMem, a_bMapInfo, a_u16FSW) do { \ 2312 if ( !(a_u16FSW & X86_FSW_ES) \ 2313 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \ 2314 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \ 2315 iemMemCommitAndUnmapWoJmp(pVCpu, a_bMapInfo); \ 2316 else \ 2317 iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \ 2419 2318 } while (0) 2420 2319 #endif … … 2422 2321 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory. */ 2423 2322 #ifndef IEM_WITH_SETJMP 2424 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) do { \2425 RT_NOREF_PV(a_bMapInfo); Assert(a_bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); \2426 iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \2427 } while (0)2428 #else2429 2323 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \ 2430 iemMemRollbackAndUnmapWo(pVCpu, (a_pvMem), a_bMapInfo) 2324 iemMemRollbackAndUnmap(pVCpu, a_bMapInfo) 2325 #else 2326 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_pvMem, a_bMapInfo) \ 2327 iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo) 2431 2328 #endif 2432 2329
Note:
See TracChangeset
for help on using the changeset viewer.