Changeset 108220 in vbox
- Timestamp:
- Feb 14, 2025 11:40:20 AM (4 weeks ago)
- svn:sync-xref-src-repo-rev:
- 167540
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
- 3 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r108204 r108220 195 195 VMMAll/HMVMXAll.cpp \ 196 196 VMMAll/IEMAll.cpp \ 197 VMMAll/target-x86/IEMAllExec-x86.cpp \ 198 VMMAll/target-x86/IEMAllXcpt-x86.cpp \ 199 VMMAll/target-x86/IEMAllHlpFpu-x86.cpp \ 197 200 VMMAll/target-x86/IEMAllIntprTables1-x86.cpp \ 198 201 VMMAll/target-x86/IEMAllIntprTables2-x86.cpp \ … … 937 940 VMMAll/HMVMXAll.cpp \ 938 941 VMMAll/IEMAll.cpp \ 942 VMMAll/target-x86/IEMAllExec-x86.cpp \ 943 VMMAll/target-x86/IEMAllXcpt-x86.cpp \ 944 VMMAll/target-x86/IEMAllHlpFpu-x86.cpp \ 939 945 VMMAll/target-x86/IEMAllIntprTables1-x86.cpp \ 940 946 VMMAll/target-x86/IEMAllIntprTables2-x86.cpp \ … … 1151 1157 # cl : Command line warning D9025 : overriding '/Oy-' with '/Oy' 1152 1158 VMMAll/IEMAll.cpp_CXXFLAGS += -noover -O2xy 1159 VMMAll/target-x86/IEMAllExec-x86.cpp_CXXFLAGS += -noover -O2xy 1160 VMMAll/target-x86/IEMAllXcpt-x86.cpp_CXXFLAGS += -noover -O2xy 1161 VMMAll/target-x86/IEMAllHlpFpu-x86.cpp_CXXFLAGS += -noover -O2xy 1153 1162 VMMAll/target-x86/IEMAllAImplC-x86.cpp_CXXFLAGS += -noover -O2xy 1154 1163 VMMAll/target-x86/IEMAllCImpl-x86.cpp_CXXFLAGS += -noover -O2xy … … 1162 1171 # Omitting the frame pointer results in larger code, but it might be worth it. (esp addressing vs ebp?) 1163 1172 VMMAll/IEMAll.cpp_CXXFLAGS += -O2 -fomit-frame-pointer 1173 VMMAll/target-x86/IEMAllExec-x86.cpp_CXXFLAGS += -O2 -fomit-frame-pointer 1174 VMMAll/target-x86/IEMAllXcpt-x86.cpp_CXXFLAGS += -O2 -fomit-frame-pointer 1175 VMMAll/target-x86/IEMAllHlpFpu-x86.cpp_CXXFLAGS += -O2 -fomit-frame-pointer 1164 1176 VMMAll/target-x86/IEMAllCImpl-x86.cpp_CXXFLAGS += -O2 -fomit-frame-pointer 1165 1177 VMMAll/target-x86/IEMAllCImplSvmInstr-x86.cpp_CXXFLAGS += -O2 -fomit-frame-pointer -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r108195 r108220 189 189 size_t g_cbIemWrote; 190 190 #endif 191 192 193 /*********************************************************************************************************************************194 * Internal Functions *195 *********************************************************************************************************************************/196 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,197 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;198 191 199 192 … … 2272 2265 2273 2266 2274 /** @name Misc Worker Functions.2275 * @{2276 */2277 2278 /**2279 * Gets the exception class for the specified exception vector.2280 *2281 * @returns The class of the specified exception.2282 * @param uVector The exception vector.2283 */2284 static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT2285 {2286 Assert(uVector <= X86_XCPT_LAST);2287 switch (uVector)2288 {2289 case X86_XCPT_DE:2290 case X86_XCPT_TS:2291 case X86_XCPT_NP:2292 case X86_XCPT_SS:2293 case X86_XCPT_GP:2294 case X86_XCPT_SX: /* AMD only */2295 return IEMXCPTCLASS_CONTRIBUTORY;2296 2297 case X86_XCPT_PF:2298 case X86_XCPT_VE: /* Intel only */2299 return IEMXCPTCLASS_PAGE_FAULT;2300 2301 case X86_XCPT_DF:2302 return IEMXCPTCLASS_DOUBLE_FAULT;2303 }2304 return IEMXCPTCLASS_BENIGN;2305 }2306 2307 2308 /**2309 * Evaluates how to handle an exception caused during delivery of another event2310 * (exception / interrupt).2311 *2312 * @returns How to handle the recursive exception.2313 * @param pVCpu The cross context virtual CPU structure of the2314 * calling thread.2315 * @param fPrevFlags The flags of the previous event.2316 * @param uPrevVector The vector of the previous event.2317 * @param fCurFlags The flags of the current exception.2318 * @param uCurVector The vector of the current exception.2319 * @param pfXcptRaiseInfo Where to store additional information about the2320 * exception condition. Optional.2321 */2322 VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,2323 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)2324 {2325 /*2326 * Only CPU exceptions can be raised while delivering other events, software interrupt2327 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.2328 */2329 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);2330 Assert(pVCpu); RT_NOREF(pVCpu);2331 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));2332 2333 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;2334 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;2335 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)2336 {2337 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);2338 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)2339 {2340 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);2341 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT2342 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT2343 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))2344 {2345 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;2346 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF2347 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;2348 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,2349 uCurVector, pVCpu->cpum.GstCtx.cr2));2350 }2351 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY2352 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)2353 {2354 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;2355 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));2356 }2357 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT2358 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY2359 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))2360 {2361 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;2362 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));2363 }2364 }2365 else2366 {2367 if (uPrevVector == X86_XCPT_NMI)2368 {2369 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;2370 if (uCurVector == X86_XCPT_PF)2371 {2372 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;2373 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));2374 }2375 }2376 else if ( uPrevVector == X86_XCPT_AC2377 && uCurVector == X86_XCPT_AC)2378 {2379 enmRaise = IEMXCPTRAISE_CPU_HANG;2380 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;2381 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));2382 }2383 }2384 }2385 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)2386 {2387 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;2388 if (uCurVector == X86_XCPT_PF)2389 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;2390 }2391 else2392 {2393 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);2394 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;2395 }2396 2397 if (pfXcptRaiseInfo)2398 *pfXcptRaiseInfo = fRaiseInfo;2399 return enmRaise;2400 }2401 2402 2403 /**2404 * Enters the CPU shutdown state initiated by a triple fault or other2405 * unrecoverable conditions.2406 *2407 * @returns Strict VBox status code.2408 * @param pVCpu The cross context virtual CPU structure of the2409 * calling thread.2410 */2411 static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT2412 {2413 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))2414 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);2415 2416 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))2417 {2418 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));2419 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);2420 }2421 2422 RT_NOREF(pVCpu);2423 return VINF_EM_TRIPLE_FAULT;2424 }2425 2426 2427 /**2428 * Validates a new SS segment.2429 *2430 * @returns VBox strict status code.2431 * @param pVCpu The cross context virtual CPU structure of the2432 * calling thread.2433 * @param NewSS The new SS selctor.2434 * @param uCpl The CPL to load the stack for.2435 * @param pDesc Where to return the descriptor.2436 */2437 static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT2438 {2439 /* Null selectors are not allowed (we're not called for dispatching2440 interrupts with SS=0 in long mode). */2441 if (!(NewSS & X86_SEL_MASK_OFF_RPL))2442 {2443 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));2444 return iemRaiseTaskSwitchFault0(pVCpu);2445 }2446 2447 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */2448 if ((NewSS & X86_SEL_RPL) != uCpl)2449 {2450 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));2451 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2452 }2453 2454 /*2455 * Read the descriptor.2456 */2457 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);2458 if (rcStrict != VINF_SUCCESS)2459 return rcStrict;2460 2461 /*2462 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.2463 */2464 if (!pDesc->Legacy.Gen.u1DescType)2465 {2466 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));2467 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2468 }2469 2470 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)2471 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )2472 {2473 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));2474 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2475 }2476 if (pDesc->Legacy.Gen.u2Dpl != uCpl)2477 {2478 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));2479 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2480 }2481 2482 /* Is it there? */2483 /** @todo testcase: Is this checked before the canonical / limit check below? */2484 if (!pDesc->Legacy.Gen.u1Present)2485 {2486 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));2487 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);2488 }2489 2490 return VINF_SUCCESS;2491 }2492 2493 /** @} */2494 2495 2496 /** @name Raising Exceptions.2497 *2498 * @{2499 */2500 2501 2502 /**2503 * Loads the specified stack far pointer from the TSS.2504 *2505 * @returns VBox strict status code.2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.2507 * @param uCpl The CPL to load the stack for.2508 * @param pSelSS Where to return the new stack segment.2509 * @param puEsp Where to return the new stack pointer.2510 */2511 static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT2512 {2513 VBOXSTRICTRC rcStrict;2514 Assert(uCpl < 4);2515 2516 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);2517 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)2518 {2519 /*2520 * 16-bit TSS (X86TSS16).2521 */2522 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();2523 case X86_SEL_TYPE_SYS_286_TSS_BUSY:2524 {2525 uint32_t off = uCpl * 4 + 2;2526 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)2527 {2528 /** @todo check actual access pattern here. */2529 uint32_t u32Tmp = 0; /* gcc maybe... */2530 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2531 if (rcStrict == VINF_SUCCESS)2532 {2533 *puEsp = RT_LOWORD(u32Tmp);2534 *pSelSS = RT_HIWORD(u32Tmp);2535 return VINF_SUCCESS;2536 }2537 }2538 else2539 {2540 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));2541 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2542 }2543 break;2544 }2545 2546 /*2547 * 32-bit TSS (X86TSS32).2548 */2549 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();2550 case X86_SEL_TYPE_SYS_386_TSS_BUSY:2551 {2552 uint32_t off = uCpl * 8 + 4;2553 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)2554 {2555 /** @todo check actual access pattern here. */2556 uint64_t u64Tmp;2557 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2558 if (rcStrict == VINF_SUCCESS)2559 {2560 *puEsp = u64Tmp & UINT32_MAX;2561 *pSelSS = (RTSEL)(u64Tmp >> 32);2562 return VINF_SUCCESS;2563 }2564 }2565 else2566 {2567 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));2568 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2569 }2570 break;2571 }2572 2573 default:2574 AssertFailed();2575 rcStrict = VERR_IEM_IPE_4;2576 break;2577 }2578 2579 *puEsp = 0; /* make gcc happy */2580 *pSelSS = 0; /* make gcc happy */2581 return rcStrict;2582 }2583 2584 2585 /**2586 * Loads the specified stack pointer from the 64-bit TSS.2587 *2588 * @returns VBox strict status code.2589 * @param pVCpu The cross context virtual CPU structure of the calling thread.2590 * @param uCpl The CPL to load the stack for.2591 * @param uIst The interrupt stack table index, 0 if to use uCpl.2592 * @param puRsp Where to return the new stack pointer.2593 */2594 static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT2595 {2596 Assert(uCpl < 4);2597 Assert(uIst < 8);2598 *puRsp = 0; /* make gcc happy */2599 2600 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);2601 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);2602 2603 uint32_t off;2604 if (uIst)2605 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);2606 else2607 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);2608 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)2609 {2610 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));2611 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2612 }2613 2614 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2615 }2616 2617 2618 /**2619 * Adjust the CPU state according to the exception being raised.2620 *2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.2622 * @param u8Vector The exception that has been raised.2623 */2624 DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)2625 {2626 switch (u8Vector)2627 {2628 case X86_XCPT_DB:2629 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);2630 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;2631 break;2632 /** @todo Read the AMD and Intel exception reference... */2633 }2634 }2635 2636 2637 /**2638 * Implements exceptions and interrupts for real mode.2639 *2640 * @returns VBox strict status code.2641 * @param pVCpu The cross context virtual CPU structure of the calling thread.2642 * @param cbInstr The number of bytes to offset rIP by in the return2643 * address.2644 * @param u8Vector The interrupt / exception vector number.2645 * @param fFlags The flags.2646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.2647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.2648 */2649 static VBOXSTRICTRC2650 iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,2651 uint8_t cbInstr,2652 uint8_t u8Vector,2653 uint32_t fFlags,2654 uint16_t uErr,2655 uint64_t uCr2) RT_NOEXCEPT2656 {2657 NOREF(uErr); NOREF(uCr2);2658 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);2659 2660 /*2661 * Read the IDT entry.2662 */2663 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)2664 {2665 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));2666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));2667 }2668 RTFAR16 Idte;2669 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);2670 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))2671 {2672 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));2673 return rcStrict;2674 }2675 2676 #ifdef LOG_ENABLED2677 /* If software interrupt, try decode it if logging is enabled and such. */2678 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)2679 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))2680 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);2681 #endif2682 2683 /*2684 * Push the stack frame.2685 */2686 uint8_t bUnmapInfo;2687 uint16_t *pu16Frame;2688 uint64_t uNewRsp;2689 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);2690 if (rcStrict != VINF_SUCCESS)2691 return rcStrict;2692 2693 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);2694 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC2695 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);2696 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)2697 fEfl |= UINT16_C(0xf000);2698 #endif2699 pu16Frame[2] = (uint16_t)fEfl;2700 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;2701 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;2702 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);2703 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))2704 return rcStrict;2705 2706 /*2707 * Load the vector address into cs:ip and make exception specific state2708 * adjustments.2709 */2710 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;2711 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;2712 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;2713 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;2714 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */2715 pVCpu->cpum.GstCtx.rip = Idte.off;2716 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);2717 IEMMISC_SET_EFL(pVCpu, fEfl);2718 2719 /** @todo do we actually do this in real mode? */2720 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)2721 iemRaiseXcptAdjustState(pVCpu, u8Vector);2722 2723 /*2724 * Deal with debug events that follows the exception and clear inhibit flags.2725 */2726 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)2727 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))2728 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);2729 else2730 {2731 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",2732 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));2733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);2734 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)2735 >> CPUMCTX_DBG_HIT_DRX_SHIFT;2736 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);2737 return iemRaiseDebugException(pVCpu);2738 }2739 2740 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,2741 so best leave them alone in case we're in a weird kind of real mode... */2742 2743 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;2744 }2745 2746 2747 /**2748 * Loads a NULL data selector into when coming from V8086 mode.2749 *2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.2751 * @param pSReg Pointer to the segment register.2752 */2753 DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)2754 {2755 pSReg->Sel = 0;2756 pSReg->ValidSel = 0;2757 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))2758 {2759 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */2760 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;2761 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;2762 }2763 else2764 {2765 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2766 /** @todo check this on AMD-V */2767 pSReg->u64Base = 0;2768 pSReg->u32Limit = 0;2769 }2770 }2771 2772 2773 /**2774 * Loads a segment selector during a task switch in V8086 mode.2775 *2776 * @param pSReg Pointer to the segment register.2777 * @param uSel The selector value to load.2778 */2779 DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)2780 {2781 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */2782 pSReg->Sel = uSel;2783 pSReg->ValidSel = uSel;2784 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2785 pSReg->u64Base = uSel << 4;2786 pSReg->u32Limit = 0xffff;2787 pSReg->Attr.u = 0xf3;2788 }2789 2790 2791 /**2792 * Loads a segment selector during a task switch in protected mode.2793 *2794 * In this task switch scenario, we would throw \#TS exceptions rather than2795 * \#GPs.2796 *2797 * @returns VBox strict status code.2798 * @param pVCpu The cross context virtual CPU structure of the calling thread.2799 * @param pSReg Pointer to the segment register.2800 * @param uSel The new selector value.2801 *2802 * @remarks This does _not_ handle CS or SS.2803 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.2804 */2805 static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT2806 {2807 Assert(!IEM_IS_64BIT_CODE(pVCpu));2808 2809 /* Null data selector. */2810 if (!(uSel & X86_SEL_MASK_OFF_RPL))2811 {2812 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);2813 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));2814 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);2815 return VINF_SUCCESS;2816 }2817 2818 /* Fetch the descriptor. */2819 IEMSELDESC Desc;2820 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);2821 if (rcStrict != VINF_SUCCESS)2822 {2823 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,2824 VBOXSTRICTRC_VAL(rcStrict)));2825 return rcStrict;2826 }2827 2828 /* Must be a data segment or readable code segment. */2829 if ( !Desc.Legacy.Gen.u1DescType2830 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)2831 {2832 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,2833 Desc.Legacy.Gen.u4Type));2834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2835 }2836 2837 /* Check privileges for data segments and non-conforming code segments. */2838 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))2839 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))2840 {2841 /* The RPL and the new CPL must be less than or equal to the DPL. */2842 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl2843 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))2844 {2845 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",2846 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));2847 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2848 }2849 }2850 2851 /* Is it there? */2852 if (!Desc.Legacy.Gen.u1Present)2853 {2854 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));2855 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2856 }2857 2858 /* The base and limit. */2859 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);2860 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);2861 2862 /*2863 * Ok, everything checked out fine. Now set the accessed bit before2864 * committing the result into the registers.2865 */2866 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))2867 {2868 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);2869 if (rcStrict != VINF_SUCCESS)2870 return rcStrict;2871 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;2872 }2873 2874 /* Commit */2875 pSReg->Sel = uSel;2876 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);2877 pSReg->u32Limit = cbLimit;2878 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */2879 pSReg->ValidSel = uSel;2880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))2882 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;2883 2884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));2885 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);2886 return VINF_SUCCESS;2887 }2888 2889 2890 /**2891 * Performs a task switch.2892 *2893 * If the task switch is the result of a JMP, CALL or IRET instruction, the2894 * caller is responsible for performing the necessary checks (like DPL, TSS2895 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction2896 * reference for JMP, CALL, IRET.2897 *2898 * If the task switch is the due to a software interrupt or hardware exception,2899 * the caller is responsible for validating the TSS selector and descriptor. See2900 * Intel Instruction reference for INT n.2901 *2902 * @returns VBox strict status code.2903 * @param pVCpu The cross context virtual CPU structure of the calling thread.2904 * @param enmTaskSwitch The cause of the task switch.2905 * @param uNextEip The EIP effective after the task switch.2906 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.2907 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.2908 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.2909 * @param SelTss The TSS selector of the new task.2910 * @param pNewDescTss Pointer to the new TSS descriptor.2911 */2912 VBOXSTRICTRC2913 iemTaskSwitch(PVMCPUCC pVCpu,2914 IEMTASKSWITCH enmTaskSwitch,2915 uint32_t uNextEip,2916 uint32_t fFlags,2917 uint16_t uErr,2918 uint64_t uCr2,2919 RTSEL SelTss,2920 PIEMSELDESC pNewDescTss) RT_NOEXCEPT2921 {2922 Assert(!IEM_IS_REAL_MODE(pVCpu));2923 Assert(!IEM_IS_64BIT_CODE(pVCpu));2924 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);2925 2926 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;2927 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL2928 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY2929 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2930 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2931 2932 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2933 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2934 2935 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,2936 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));2937 2938 /* Update CR2 in case it's a page-fault. */2939 /** @todo This should probably be done much earlier in IEM/PGM. See2940 * @bugref{5653#c49}. */2941 if (fFlags & IEM_XCPT_FLAGS_CR2)2942 pVCpu->cpum.GstCtx.cr2 = uCr2;2943 2944 /*2945 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"2946 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".2947 */2948 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);2949 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;2950 if (uNewTssLimit < uNewTssLimitMin)2951 {2952 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",2953 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);2955 }2956 2957 /*2958 * Task switches in VMX non-root mode always cause task switches.2959 * The new TSS must have been read and validated (DPL, limits etc.) before a2960 * task-switch VM-exit commences.2961 *2962 * See Intel spec. 25.4.2 "Treatment of Task Switches".2963 */2964 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))2965 {2966 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));2967 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);2968 }2969 2970 /*2971 * The SVM nested-guest intercept for task-switch takes priority over all exceptions2972 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".2973 */2974 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))2975 {2976 uint64_t const uExitInfo1 = SelTss;2977 uint64_t uExitInfo2 = uErr;2978 switch (enmTaskSwitch)2979 {2980 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;2981 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;2982 default: break;2983 }2984 if (fFlags & IEM_XCPT_FLAGS_ERR)2985 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;2986 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)2987 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;2988 2989 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));2990 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);2991 RT_NOREF2(uExitInfo1, uExitInfo2);2992 }2993 2994 /*2995 * Check the current TSS limit. The last written byte to the current TSS during the2996 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).2997 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.2998 *2999 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can3000 * end up with smaller than "legal" TSS limits.3001 */3002 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;3003 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;3004 if (uCurTssLimit < uCurTssLimitMin)3005 {3006 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",3007 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));3008 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);3009 }3010 3011 /*3012 * Verify that the new TSS can be accessed and map it. Map only the required contents3013 * and not the entire TSS.3014 */3015 uint8_t bUnmapInfoNewTss;3016 void *pvNewTss;3017 uint32_t const cbNewTss = uNewTssLimitMin + 1;3018 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);3019 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);3020 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may3021 * not perform correct translation if this happens. See Intel spec. 7.2.13022 * "Task-State Segment". */3023 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);3024 /** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.3025 * Consider wrapping the remainder into a function for simpler cleanup. */3026 if (rcStrict != VINF_SUCCESS)3027 {3028 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,3029 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));3030 return rcStrict;3031 }3032 3033 /*3034 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.3035 */3036 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;3037 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP3038 || enmTaskSwitch == IEMTASKSWITCH_IRET)3039 {3040 uint8_t bUnmapInfoDescCurTss;3041 PX86DESC pDescCurTss;3042 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,3043 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);3044 if (rcStrict != VINF_SUCCESS)3045 {3046 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3047 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3048 return rcStrict;3049 }3050 3051 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3052 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);3053 if (rcStrict != VINF_SUCCESS)3054 {3055 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3056 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3057 return rcStrict;3058 }3059 3060 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */3061 if (enmTaskSwitch == IEMTASKSWITCH_IRET)3062 {3063 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY3064 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);3065 fEFlags &= ~X86_EFL_NT;3066 }3067 }3068 3069 /*3070 * Save the CPU state into the current TSS.3071 */3072 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;3073 if (GCPtrNewTss == GCPtrCurTss)3074 {3075 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));3076 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",3077 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,3078 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,3079 pVCpu->cpum.GstCtx.ldtr.Sel));3080 }3081 if (fIsNewTss386)3082 {3083 /*3084 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.3085 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.3086 */3087 uint8_t bUnmapInfoCurTss32;3088 void *pvCurTss32;3089 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);3090 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);3091 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);3092 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,3093 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);3094 if (rcStrict != VINF_SUCCESS)3095 {3096 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",3097 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));3098 return rcStrict;3099 }3100 3101 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */3102 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);3103 pCurTss32->eip = uNextEip;3104 pCurTss32->eflags = fEFlags;3105 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;3106 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;3107 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;3108 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;3109 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;3110 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;3111 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;3112 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;3113 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;3114 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;3115 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;3116 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;3117 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;3118 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;3119 3120 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);3121 if (rcStrict != VINF_SUCCESS)3122 {3123 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,3124 VBOXSTRICTRC_VAL(rcStrict)));3125 return rcStrict;3126 }3127 }3128 else3129 {3130 /*3131 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.3132 */3133 uint8_t bUnmapInfoCurTss16;3134 void *pvCurTss16;3135 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);3136 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);3137 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);3138 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,3139 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);3140 if (rcStrict != VINF_SUCCESS)3141 {3142 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",3143 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));3144 return rcStrict;3145 }3146 3147 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */3148 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);3149 pCurTss16->ip = uNextEip;3150 pCurTss16->flags = (uint16_t)fEFlags;3151 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;3152 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;3153 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;3154 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;3155 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;3156 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;3157 pCurTss16->si = pVCpu->cpum.GstCtx.si;3158 pCurTss16->di = pVCpu->cpum.GstCtx.di;3159 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;3160 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;3161 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;3162 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;3163 3164 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);3165 if (rcStrict != VINF_SUCCESS)3166 {3167 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,3168 VBOXSTRICTRC_VAL(rcStrict)));3169 return rcStrict;3170 }3171 }3172 3173 /*3174 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.3175 */3176 if ( enmTaskSwitch == IEMTASKSWITCH_CALL3177 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)3178 {3179 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */3180 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;3181 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;3182 }3183 3184 /*3185 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,3186 * it's done further below with error handling (e.g. CR3 changes will go through PGM).3187 */3188 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;3189 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;3190 bool fNewDebugTrap;3191 if (fIsNewTss386)3192 {3193 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;3194 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;3195 uNewEip = pNewTss32->eip;3196 uNewEflags = pNewTss32->eflags;3197 uNewEax = pNewTss32->eax;3198 uNewEcx = pNewTss32->ecx;3199 uNewEdx = pNewTss32->edx;3200 uNewEbx = pNewTss32->ebx;3201 uNewEsp = pNewTss32->esp;3202 uNewEbp = pNewTss32->ebp;3203 uNewEsi = pNewTss32->esi;3204 uNewEdi = pNewTss32->edi;3205 uNewES = pNewTss32->es;3206 uNewCS = pNewTss32->cs;3207 uNewSS = pNewTss32->ss;3208 uNewDS = pNewTss32->ds;3209 uNewFS = pNewTss32->fs;3210 uNewGS = pNewTss32->gs;3211 uNewLdt = pNewTss32->selLdt;3212 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);3213 }3214 else3215 {3216 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;3217 uNewCr3 = 0;3218 uNewEip = pNewTss16->ip;3219 uNewEflags = pNewTss16->flags;3220 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;3221 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;3222 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;3223 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;3224 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;3225 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;3226 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;3227 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;3228 uNewES = pNewTss16->es;3229 uNewCS = pNewTss16->cs;3230 uNewSS = pNewTss16->ss;3231 uNewDS = pNewTss16->ds;3232 uNewFS = 0;3233 uNewGS = 0;3234 uNewLdt = pNewTss16->selLdt;3235 fNewDebugTrap = false;3236 }3237 3238 if (GCPtrNewTss == GCPtrCurTss)3239 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",3240 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));3241 3242 /*3243 * We're done accessing the new TSS.3244 */3245 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);3246 if (rcStrict != VINF_SUCCESS)3247 {3248 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));3249 return rcStrict;3250 }3251 3252 /*3253 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.3254 */3255 if (enmTaskSwitch != IEMTASKSWITCH_IRET)3256 {3257 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,3258 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);3259 if (rcStrict != VINF_SUCCESS)3260 {3261 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3262 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3263 return rcStrict;3264 }3265 3266 /* Check that the descriptor indicates the new TSS is available (not busy). */3267 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL3268 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,3269 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));3270 3271 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3272 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);3273 if (rcStrict != VINF_SUCCESS)3274 {3275 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3276 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3277 return rcStrict;3278 }3279 }3280 3281 /*3282 * From this point on, we're technically in the new task. We will defer exceptions3283 * until the completion of the task switch but before executing any instructions in the new task.3284 */3285 pVCpu->cpum.GstCtx.tr.Sel = SelTss;3286 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;3287 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;3288 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);3289 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);3290 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);3291 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);3292 3293 /* Set the busy bit in TR. */3294 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3295 3296 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */3297 if ( enmTaskSwitch == IEMTASKSWITCH_CALL3298 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)3299 {3300 uNewEflags |= X86_EFL_NT;3301 }3302 3303 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */3304 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;3305 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);3306 3307 pVCpu->cpum.GstCtx.eip = uNewEip;3308 pVCpu->cpum.GstCtx.eax = uNewEax;3309 pVCpu->cpum.GstCtx.ecx = uNewEcx;3310 pVCpu->cpum.GstCtx.edx = uNewEdx;3311 pVCpu->cpum.GstCtx.ebx = uNewEbx;3312 pVCpu->cpum.GstCtx.esp = uNewEsp;3313 pVCpu->cpum.GstCtx.ebp = uNewEbp;3314 pVCpu->cpum.GstCtx.esi = uNewEsi;3315 pVCpu->cpum.GstCtx.edi = uNewEdi;3316 3317 uNewEflags &= X86_EFL_LIVE_MASK;3318 uNewEflags |= X86_EFL_RA1_MASK;3319 IEMMISC_SET_EFL(pVCpu, uNewEflags);3320 3321 /*3322 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors3323 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR33324 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.3325 */3326 pVCpu->cpum.GstCtx.es.Sel = uNewES;3327 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;3328 3329 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;3330 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;3331 3332 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;3333 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;3334 3335 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;3336 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;3337 3338 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;3339 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;3340 3341 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;3342 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;3343 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);3344 3345 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;3346 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;3347 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;3348 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);3349 3350 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))3351 {3352 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;3353 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;3354 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;3355 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;3356 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;3357 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;3358 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;3359 }3360 3361 /*3362 * Switch CR3 for the new task.3363 */3364 if ( fIsNewTss3863365 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))3366 {3367 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */3368 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);3369 AssertRCSuccessReturn(rc, rc);3370 3371 /* Inform PGM. */3372 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */3373 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));3374 AssertRCReturn(rc, rc);3375 /* ignore informational status codes */3376 3377 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);3378 }3379 3380 /*3381 * Switch LDTR for the new task.3382 */3383 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))3384 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);3385 else3386 {3387 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */3388 3389 IEMSELDESC DescNewLdt;3390 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);3391 if (rcStrict != VINF_SUCCESS)3392 {3393 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,3394 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));3395 return rcStrict;3396 }3397 if ( !DescNewLdt.Legacy.Gen.u1Present3398 || DescNewLdt.Legacy.Gen.u1DescType3399 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)3400 {3401 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,3402 uNewLdt, DescNewLdt.Legacy.u));3403 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);3404 }3405 3406 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;3407 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;3408 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);3409 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);3410 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);3411 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))3412 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;3413 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));3414 }3415 3416 IEMSELDESC DescSS;3417 if (IEM_IS_V86_MODE(pVCpu))3418 {3419 IEM_SET_CPL(pVCpu, 3);3420 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);3421 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);3422 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);3423 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);3424 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);3425 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);3426 3427 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */3428 DescSS.Legacy.u = 0;3429 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;3430 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;3431 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;3432 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);3433 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);3434 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;3435 DescSS.Legacy.Gen.u2Dpl = 3;3436 }3437 else3438 {3439 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);3440 3441 /*3442 * Load the stack segment for the new task.3443 */3444 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))3445 {3446 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));3447 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3448 }3449 3450 /* Fetch the descriptor. */3451 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);3452 if (rcStrict != VINF_SUCCESS)3453 {3454 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,3455 VBOXSTRICTRC_VAL(rcStrict)));3456 return rcStrict;3457 }3458 3459 /* SS must be a data segment and writable. */3460 if ( !DescSS.Legacy.Gen.u1DescType3461 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)3462 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))3463 {3464 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",3465 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));3466 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3467 }3468 3469 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */3470 if ( (uNewSS & X86_SEL_RPL) != uNewCpl3471 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)3472 {3473 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,3474 uNewCpl));3475 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3476 }3477 3478 /* Is it there? */3479 if (!DescSS.Legacy.Gen.u1Present)3480 {3481 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));3482 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3483 }3484 3485 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);3486 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);3487 3488 /* Set the accessed bit before committing the result into SS. */3489 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))3490 {3491 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);3492 if (rcStrict != VINF_SUCCESS)3493 return rcStrict;3494 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;3495 }3496 3497 /* Commit SS. */3498 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;3499 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;3500 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);3501 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;3502 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;3503 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;3504 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));3505 3506 /* CPL has changed, update IEM before loading rest of segments. */3507 IEM_SET_CPL(pVCpu, uNewCpl);3508 3509 /*3510 * Load the data segments for the new task.3511 */3512 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);3513 if (rcStrict != VINF_SUCCESS)3514 return rcStrict;3515 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);3516 if (rcStrict != VINF_SUCCESS)3517 return rcStrict;3518 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);3519 if (rcStrict != VINF_SUCCESS)3520 return rcStrict;3521 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);3522 if (rcStrict != VINF_SUCCESS)3523 return rcStrict;3524 3525 /*3526 * Load the code segment for the new task.3527 */3528 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))3529 {3530 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));3531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3532 }3533 3534 /* Fetch the descriptor. */3535 IEMSELDESC DescCS;3536 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);3537 if (rcStrict != VINF_SUCCESS)3538 {3539 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));3540 return rcStrict;3541 }3542 3543 /* CS must be a code segment. */3544 if ( !DescCS.Legacy.Gen.u1DescType3545 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))3546 {3547 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,3548 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));3549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3550 }3551 3552 /* For conforming CS, DPL must be less than or equal to the RPL. */3553 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)3554 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))3555 {3556 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,3557 DescCS.Legacy.Gen.u2Dpl));3558 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3559 }3560 3561 /* For non-conforming CS, DPL must match RPL. */3562 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)3563 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))3564 {3565 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,3566 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));3567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3568 }3569 3570 /* Is it there? */3571 if (!DescCS.Legacy.Gen.u1Present)3572 {3573 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));3574 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3575 }3576 3577 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);3578 u64Base = X86DESC_BASE(&DescCS.Legacy);3579 3580 /* Set the accessed bit before committing the result into CS. */3581 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))3582 {3583 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);3584 if (rcStrict != VINF_SUCCESS)3585 return rcStrict;3586 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;3587 }3588 3589 /* Commit CS. */3590 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;3591 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;3592 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);3593 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;3594 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;3595 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;3596 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));3597 }3598 3599 /* Make sure the CPU mode is correct. */3600 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);3601 if (fExecNew != pVCpu->iem.s.fExec)3602 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));3603 pVCpu->iem.s.fExec = fExecNew;3604 3605 /** @todo Debug trap. */3606 if (fIsNewTss386 && fNewDebugTrap)3607 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));3608 3609 /*3610 * Construct the error code masks based on what caused this task switch.3611 * See Intel Instruction reference for INT.3612 */3613 uint16_t uExt;3614 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT3615 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3616 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))3617 uExt = 1;3618 else3619 uExt = 0;3620 3621 /*3622 * Push any error code on to the new stack.3623 */3624 if (fFlags & IEM_XCPT_FLAGS_ERR)3625 {3626 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);3627 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);3628 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;3629 3630 /* Check that there is sufficient space on the stack. */3631 /** @todo Factor out segment limit checking for normal/expand down segments3632 * into a separate function. */3633 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))3634 {3635 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS3636 || pVCpu->cpum.GstCtx.esp < cbStackFrame)3637 {3638 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */3639 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",3640 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));3641 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);3642 }3643 }3644 else3645 {3646 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))3647 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))3648 {3649 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",3650 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));3651 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);3652 }3653 }3654 3655 3656 if (fIsNewTss386)3657 rcStrict = iemMemStackPushU32(pVCpu, uErr);3658 else3659 rcStrict = iemMemStackPushU16(pVCpu, uErr);3660 if (rcStrict != VINF_SUCCESS)3661 {3662 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",3663 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));3664 return rcStrict;3665 }3666 }3667 3668 /* Check the new EIP against the new CS limit. */3669 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)3670 {3671 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",3672 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));3673 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */3674 return iemRaiseGeneralProtectionFault(pVCpu, uExt);3675 }3676 3677 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,3678 pVCpu->cpum.GstCtx.ss.Sel));3679 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;3680 }3681 3682 3683 /**3684 * Implements exceptions and interrupts for protected mode.3685 *3686 * @returns VBox strict status code.3687 * @param pVCpu The cross context virtual CPU structure of the calling thread.3688 * @param cbInstr The number of bytes to offset rIP by in the return3689 * address.3690 * @param u8Vector The interrupt / exception vector number.3691 * @param fFlags The flags.3692 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.3693 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.3694 */3695 static VBOXSTRICTRC3696 iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,3697 uint8_t cbInstr,3698 uint8_t u8Vector,3699 uint32_t fFlags,3700 uint16_t uErr,3701 uint64_t uCr2) RT_NOEXCEPT3702 {3703 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);3704 3705 /*3706 * Read the IDT entry.3707 */3708 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)3709 {3710 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));3711 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3712 }3713 X86DESC Idte;3714 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,3715 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);3716 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))3717 {3718 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));3719 return rcStrict;3720 }3721 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",3722 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,3723 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,3724 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));3725 3726 /*3727 * Check the descriptor type, DPL and such.3728 * ASSUMES this is done in the same order as described for call-gate calls.3729 */3730 if (Idte.Gate.u1DescType)3731 {3732 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));3733 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3734 }3735 bool fTaskGate = false;3736 uint8_t f32BitGate = true;3737 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;3738 switch (Idte.Gate.u4Type)3739 {3740 case X86_SEL_TYPE_SYS_UNDEFINED:3741 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:3742 case X86_SEL_TYPE_SYS_LDT:3743 case X86_SEL_TYPE_SYS_286_TSS_BUSY:3744 case X86_SEL_TYPE_SYS_286_CALL_GATE:3745 case X86_SEL_TYPE_SYS_UNDEFINED2:3746 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:3747 case X86_SEL_TYPE_SYS_UNDEFINED3:3748 case X86_SEL_TYPE_SYS_386_TSS_BUSY:3749 case X86_SEL_TYPE_SYS_386_CALL_GATE:3750 case X86_SEL_TYPE_SYS_UNDEFINED4:3751 {3752 /** @todo check what actually happens when the type is wrong...3753 * esp. call gates. */3754 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));3755 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3756 }3757 3758 case X86_SEL_TYPE_SYS_286_INT_GATE:3759 f32BitGate = false;3760 RT_FALL_THRU();3761 case X86_SEL_TYPE_SYS_386_INT_GATE:3762 fEflToClear |= X86_EFL_IF;3763 break;3764 3765 case X86_SEL_TYPE_SYS_TASK_GATE:3766 fTaskGate = true;3767 #ifndef IEM_IMPLEMENTS_TASKSWITCH3768 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));3769 #endif3770 break;3771 3772 case X86_SEL_TYPE_SYS_286_TRAP_GATE:3773 f32BitGate = false;3774 break;3775 case X86_SEL_TYPE_SYS_386_TRAP_GATE:3776 break;3777 3778 IEM_NOT_REACHED_DEFAULT_CASE_RET();3779 }3780 3781 /* Check DPL against CPL if applicable. */3782 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)3783 {3784 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)3785 {3786 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));3787 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3788 }3789 }3790 3791 /* Is it there? */3792 if (!Idte.Gate.u1Present)3793 {3794 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));3795 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3796 }3797 3798 /* Is it a task-gate? */3799 if (fTaskGate)3800 {3801 /*3802 * Construct the error code masks based on what caused this task switch.3803 * See Intel Instruction reference for INT.3804 */3805 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3806 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;3807 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;3808 RTSEL SelTss = Idte.Gate.u16Sel;3809 3810 /*3811 * Fetch the TSS descriptor in the GDT.3812 */3813 IEMSELDESC DescTSS;3814 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);3815 if (rcStrict != VINF_SUCCESS)3816 {3817 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,3818 VBOXSTRICTRC_VAL(rcStrict)));3819 return rcStrict;3820 }3821 3822 /* The TSS descriptor must be a system segment and be available (not busy). */3823 if ( DescTSS.Legacy.Gen.u1DescType3824 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL3825 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))3826 {3827 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",3828 u8Vector, SelTss, DescTSS.Legacy.au64));3829 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);3830 }3831 3832 /* The TSS must be present. */3833 if (!DescTSS.Legacy.Gen.u1Present)3834 {3835 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));3836 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);3837 }3838 3839 /* Do the actual task switch. */3840 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,3841 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,3842 fFlags, uErr, uCr2, SelTss, &DescTSS);3843 }3844 3845 /* A null CS is bad. */3846 RTSEL NewCS = Idte.Gate.u16Sel;3847 if (!(NewCS & X86_SEL_MASK_OFF_RPL))3848 {3849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));3850 return iemRaiseGeneralProtectionFault0(pVCpu);3851 }3852 3853 /* Fetch the descriptor for the new CS. */3854 IEMSELDESC DescCS;3855 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */3856 if (rcStrict != VINF_SUCCESS)3857 {3858 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));3859 return rcStrict;3860 }3861 3862 /* Must be a code segment. */3863 if (!DescCS.Legacy.Gen.u1DescType)3864 {3865 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));3866 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3867 }3868 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))3869 {3870 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));3871 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3872 }3873 3874 /* Don't allow lowering the privilege level. */3875 /** @todo Does the lowering of privileges apply to software interrupts3876 * only? This has bearings on the more-privileged or3877 * same-privilege stack behavior further down. A testcase would3878 * be nice. */3879 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))3880 {3881 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",3882 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));3883 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3884 }3885 3886 /* Make sure the selector is present. */3887 if (!DescCS.Legacy.Gen.u1Present)3888 {3889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));3890 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);3891 }3892 3893 #ifdef LOG_ENABLED3894 /* If software interrupt, try decode it if logging is enabled and such. */3895 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3896 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))3897 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);3898 #endif3899 3900 /* Check the new EIP against the new CS limit. */3901 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE3902 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE3903 ? Idte.Gate.u16OffsetLow3904 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);3905 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);3906 if (uNewEip > cbLimitCS)3907 {3908 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",3909 u8Vector, uNewEip, cbLimitCS, NewCS));3910 return iemRaiseGeneralProtectionFault(pVCpu, 0);3911 }3912 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));3913 3914 /* Calc the flag image to push. */3915 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);3916 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))3917 fEfl &= ~X86_EFL_RF;3918 else3919 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */3920 3921 /* From V8086 mode only go to CPL 0. */3922 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF3923 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;3924 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */3925 {3926 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));3927 return iemRaiseGeneralProtectionFault(pVCpu, 0);3928 }3929 3930 /*3931 * If the privilege level changes, we need to get a new stack from the TSS.3932 * This in turns means validating the new SS and ESP...3933 */3934 if (uNewCpl != IEM_GET_CPL(pVCpu))3935 {3936 RTSEL NewSS;3937 uint32_t uNewEsp;3938 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);3939 if (rcStrict != VINF_SUCCESS)3940 return rcStrict;3941 3942 IEMSELDESC DescSS;3943 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);3944 if (rcStrict != VINF_SUCCESS)3945 return rcStrict;3946 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */3947 if (!DescSS.Legacy.Gen.u1DefBig)3948 {3949 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));3950 uNewEsp = (uint16_t)uNewEsp;3951 }3952 3953 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));3954 3955 /* Check that there is sufficient space for the stack frame. */3956 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);3957 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)3958 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate3959 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;3960 3961 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))3962 {3963 if ( uNewEsp - 1 > cbLimitSS3964 || uNewEsp < cbStackFrame)3965 {3966 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",3967 u8Vector, NewSS, uNewEsp, cbStackFrame));3968 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);3969 }3970 }3971 else3972 {3973 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)3974 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))3975 {3976 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",3977 u8Vector, NewSS, uNewEsp, cbStackFrame));3978 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);3979 }3980 }3981 3982 /*3983 * Start making changes.3984 */3985 3986 /* Set the new CPL so that stack accesses use it. */3987 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);3988 IEM_SET_CPL(pVCpu, uNewCpl);3989 3990 /* Create the stack frame. */3991 uint8_t bUnmapInfoStackFrame;3992 RTPTRUNION uStackFrame;3993 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,3994 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),3995 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */3996 if (rcStrict != VINF_SUCCESS)3997 return rcStrict;3998 if (f32BitGate)3999 {4000 if (fFlags & IEM_XCPT_FLAGS_ERR)4001 *uStackFrame.pu32++ = uErr;4002 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4003 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;4004 uStackFrame.pu32[2] = fEfl;4005 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;4006 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;4007 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));4008 if (fEfl & X86_EFL_VM)4009 {4010 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;4011 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;4012 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;4013 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;4014 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;4015 }4016 }4017 else4018 {4019 if (fFlags & IEM_XCPT_FLAGS_ERR)4020 *uStackFrame.pu16++ = uErr;4021 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;4022 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;4023 uStackFrame.pu16[2] = fEfl;4024 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;4025 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;4026 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));4027 if (fEfl & X86_EFL_VM)4028 {4029 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;4030 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;4031 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;4032 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;4033 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;4034 }4035 }4036 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);4037 if (rcStrict != VINF_SUCCESS)4038 return rcStrict;4039 4040 /* Mark the selectors 'accessed' (hope this is the correct time). */4041 /** @todo testcase: excatly _when_ are the accessed bits set - before or4042 * after pushing the stack frame? (Write protect the gdt + stack to4043 * find out.) */4044 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4045 {4046 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4047 if (rcStrict != VINF_SUCCESS)4048 return rcStrict;4049 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4050 }4051 4052 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4053 {4054 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);4055 if (rcStrict != VINF_SUCCESS)4056 return rcStrict;4057 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4058 }4059 4060 /*4061 * Start comitting the register changes (joins with the DPL=CPL branch).4062 */4063 pVCpu->cpum.GstCtx.ss.Sel = NewSS;4064 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;4065 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;4066 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;4067 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);4068 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);4069 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and4070 * 16-bit handler, the high word of ESP remains unchanged (i.e. only4071 * SP is loaded).4072 * Need to check the other combinations too:4073 * - 16-bit TSS, 32-bit handler4074 * - 32-bit TSS, 16-bit handler */4075 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)4076 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);4077 else4078 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;4079 4080 if (fEfl & X86_EFL_VM)4081 {4082 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);4083 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);4084 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);4085 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);4086 }4087 }4088 /*4089 * Same privilege, no stack change and smaller stack frame.4090 */4091 else4092 {4093 uint64_t uNewRsp;4094 uint8_t bUnmapInfoStackFrame;4095 RTPTRUNION uStackFrame;4096 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;4097 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,4098 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);4099 if (rcStrict != VINF_SUCCESS)4100 return rcStrict;4101 4102 if (f32BitGate)4103 {4104 if (fFlags & IEM_XCPT_FLAGS_ERR)4105 *uStackFrame.pu32++ = uErr;4106 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4107 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);4108 uStackFrame.pu32[2] = fEfl;4109 }4110 else4111 {4112 if (fFlags & IEM_XCPT_FLAGS_ERR)4113 *uStackFrame.pu16++ = uErr;4114 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4115 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);4116 uStackFrame.pu16[2] = fEfl;4117 }4118 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */4119 if (rcStrict != VINF_SUCCESS)4120 return rcStrict;4121 4122 /* Mark the CS selector as 'accessed'. */4123 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4124 {4125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4126 if (rcStrict != VINF_SUCCESS)4127 return rcStrict;4128 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4129 }4130 4131 /*4132 * Start committing the register changes (joins with the other branch).4133 */4134 pVCpu->cpum.GstCtx.rsp = uNewRsp;4135 }4136 4137 /* ... register committing continues. */4138 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4139 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4140 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;4141 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;4142 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);4143 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);4144 4145 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */4146 fEfl &= ~fEflToClear;4147 IEMMISC_SET_EFL(pVCpu, fEfl);4148 4149 if (fFlags & IEM_XCPT_FLAGS_CR2)4150 pVCpu->cpum.GstCtx.cr2 = uCr2;4151 4152 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)4153 iemRaiseXcptAdjustState(pVCpu, u8Vector);4154 4155 /* Make sure the execution flags are correct. */4156 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);4157 if (fExecNew != pVCpu->iem.s.fExec)4158 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",4159 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));4160 pVCpu->iem.s.fExec = fExecNew;4161 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);4162 4163 /*4164 * Deal with debug events that follows the exception and clear inhibit flags.4165 */4166 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4167 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))4168 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4169 else4170 {4171 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",4172 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));4173 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);4174 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4175 >> CPUMCTX_DBG_HIT_DRX_SHIFT;4176 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4177 return iemRaiseDebugException(pVCpu);4178 }4179 4180 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;4181 }4182 4183 4184 /**4185 * Implements exceptions and interrupts for long mode.4186 *4187 * @returns VBox strict status code.4188 * @param pVCpu The cross context virtual CPU structure of the calling thread.4189 * @param cbInstr The number of bytes to offset rIP by in the return4190 * address.4191 * @param u8Vector The interrupt / exception vector number.4192 * @param fFlags The flags.4193 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.4194 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.4195 */4196 static VBOXSTRICTRC4197 iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,4198 uint8_t cbInstr,4199 uint8_t u8Vector,4200 uint32_t fFlags,4201 uint16_t uErr,4202 uint64_t uCr2) RT_NOEXCEPT4203 {4204 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4205 4206 /*4207 * Read the IDT entry.4208 */4209 uint16_t offIdt = (uint16_t)u8Vector << 4;4210 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)4211 {4212 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));4213 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4214 }4215 X86DESC64 Idte;4216 #ifdef _MSC_VER /* Shut up silly compiler warning. */4217 Idte.au64[0] = 0;4218 Idte.au64[1] = 0;4219 #endif4220 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);4221 if (RT_LIKELY(rcStrict == VINF_SUCCESS))4222 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);4223 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))4224 {4225 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));4226 return rcStrict;4227 }4228 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",4229 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,4230 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));4231 4232 /*4233 * Check the descriptor type, DPL and such.4234 * ASSUMES this is done in the same order as described for call-gate calls.4235 */4236 if (Idte.Gate.u1DescType)4237 {4238 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));4239 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4240 }4241 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;4242 switch (Idte.Gate.u4Type)4243 {4244 case AMD64_SEL_TYPE_SYS_INT_GATE:4245 fEflToClear |= X86_EFL_IF;4246 break;4247 case AMD64_SEL_TYPE_SYS_TRAP_GATE:4248 break;4249 4250 default:4251 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));4252 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4253 }4254 4255 /* Check DPL against CPL if applicable. */4256 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)4257 {4258 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)4259 {4260 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));4261 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4262 }4263 }4264 4265 /* Is it there? */4266 if (!Idte.Gate.u1Present)4267 {4268 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));4269 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4270 }4271 4272 /* A null CS is bad. */4273 RTSEL NewCS = Idte.Gate.u16Sel;4274 if (!(NewCS & X86_SEL_MASK_OFF_RPL))4275 {4276 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));4277 return iemRaiseGeneralProtectionFault0(pVCpu);4278 }4279 4280 /* Fetch the descriptor for the new CS. */4281 IEMSELDESC DescCS;4282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);4283 if (rcStrict != VINF_SUCCESS)4284 {4285 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));4286 return rcStrict;4287 }4288 4289 /* Must be a 64-bit code segment. */4290 if (!DescCS.Long.Gen.u1DescType)4291 {4292 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));4293 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4294 }4295 if ( !DescCS.Long.Gen.u1Long4296 || DescCS.Long.Gen.u1DefBig4297 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )4298 {4299 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",4300 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));4301 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4302 }4303 4304 /* Don't allow lowering the privilege level. For non-conforming CS4305 selectors, the CS.DPL sets the privilege level the trap/interrupt4306 handler runs at. For conforming CS selectors, the CPL remains4307 unchanged, but the CS.DPL must be <= CPL. */4308 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched4309 * when CPU in Ring-0. Result \#GP? */4310 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))4311 {4312 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",4313 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));4314 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4315 }4316 4317 4318 /* Make sure the selector is present. */4319 if (!DescCS.Legacy.Gen.u1Present)4320 {4321 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));4322 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);4323 }4324 4325 /* Check that the new RIP is canonical. */4326 uint64_t const uNewRip = Idte.Gate.u16OffsetLow4327 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)4328 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);4329 if (!IEM_IS_CANONICAL(uNewRip))4330 {4331 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));4332 return iemRaiseGeneralProtectionFault0(pVCpu);4333 }4334 4335 /*4336 * If the privilege level changes or if the IST isn't zero, we need to get4337 * a new stack from the TSS.4338 */4339 uint64_t uNewRsp;4340 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF4341 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;4342 if ( uNewCpl != IEM_GET_CPL(pVCpu)4343 || Idte.Gate.u3IST != 0)4344 {4345 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);4346 if (rcStrict != VINF_SUCCESS)4347 return rcStrict;4348 }4349 else4350 uNewRsp = pVCpu->cpum.GstCtx.rsp;4351 uNewRsp &= ~(uint64_t)0xf;4352 4353 /*4354 * Calc the flag image to push.4355 */4356 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);4357 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))4358 fEfl &= ~X86_EFL_RF;4359 else4360 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */4361 4362 /*4363 * Start making changes.4364 */4365 /* Set the new CPL so that stack accesses use it. */4366 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);4367 IEM_SET_CPL(pVCpu, uNewCpl);4368 /** @todo Setting CPL this early seems wrong as it would affect and errors we4369 * raise accessing the stack and (?) GDT/LDT... */4370 4371 /* Create the stack frame. */4372 uint8_t bUnmapInfoStackFrame;4373 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));4374 RTPTRUNION uStackFrame;4375 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,4376 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */4377 if (rcStrict != VINF_SUCCESS)4378 return rcStrict;4379 4380 if (fFlags & IEM_XCPT_FLAGS_ERR)4381 *uStackFrame.pu64++ = uErr;4382 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;4383 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */4384 uStackFrame.pu64[2] = fEfl;4385 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;4386 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;4387 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);4388 if (rcStrict != VINF_SUCCESS)4389 return rcStrict;4390 4391 /* Mark the CS selectors 'accessed' (hope this is the correct time). */4392 /** @todo testcase: excatly _when_ are the accessed bits set - before or4393 * after pushing the stack frame? (Write protect the gdt + stack to4394 * find out.) */4395 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4396 {4397 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4398 if (rcStrict != VINF_SUCCESS)4399 return rcStrict;4400 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4401 }4402 4403 /*4404 * Start comitting the register changes.4405 */4406 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the4407 * hidden registers when interrupting 32-bit or 16-bit code! */4408 if (uNewCpl != uOldCpl)4409 {4410 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;4411 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;4412 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;4413 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;4414 pVCpu->cpum.GstCtx.ss.u64Base = 0;4415 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;4416 }4417 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;4418 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4419 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4420 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;4421 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);4422 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);4423 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);4424 pVCpu->cpum.GstCtx.rip = uNewRip;4425 4426 fEfl &= ~fEflToClear;4427 IEMMISC_SET_EFL(pVCpu, fEfl);4428 4429 if (fFlags & IEM_XCPT_FLAGS_CR2)4430 pVCpu->cpum.GstCtx.cr2 = uCr2;4431 4432 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)4433 iemRaiseXcptAdjustState(pVCpu, u8Vector);4434 4435 iemRecalcExecModeAndCplAndAcFlags(pVCpu);4436 4437 /*4438 * Deal with debug events that follows the exception and clear inhibit flags.4439 */4440 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4441 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))4442 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4443 else4444 {4445 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",4446 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));4447 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);4448 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4449 >> CPUMCTX_DBG_HIT_DRX_SHIFT;4450 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4451 return iemRaiseDebugException(pVCpu);4452 }4453 4454 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;4455 }4456 4457 4458 /**4459 * Implements exceptions and interrupts.4460 *4461 * All exceptions and interrupts goes thru this function!4462 *4463 * @returns VBox strict status code.4464 * @param pVCpu The cross context virtual CPU structure of the calling thread.4465 * @param cbInstr The number of bytes to offset rIP by in the return4466 * address.4467 * @param u8Vector The interrupt / exception vector number.4468 * @param fFlags The flags.4469 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.4470 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.4471 */4472 VBOXSTRICTRC4473 iemRaiseXcptOrInt(PVMCPUCC pVCpu,4474 uint8_t cbInstr,4475 uint8_t u8Vector,4476 uint32_t fFlags,4477 uint16_t uErr,4478 uint64_t uCr2) RT_NOEXCEPT4479 {4480 /*4481 * Get all the state that we might need here.4482 */4483 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4484 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4485 4486 #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */4487 /*4488 * Flush prefetch buffer4489 */4490 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;4491 #endif4492 4493 /*4494 * Perform the V8086 IOPL check and upgrade the fault without nesting.4495 */4496 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM4497 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 34498 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT4499 | IEM_XCPT_FLAGS_BP_INSTR4500 | IEM_XCPT_FLAGS_ICEBP_INSTR4501 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT4502 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )4503 {4504 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));4505 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;4506 u8Vector = X86_XCPT_GP;4507 uErr = 0;4508 }4509 4510 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);4511 #ifdef DBGFTRACE_ENABLED4512 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",4513 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,4514 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);4515 #endif4516 4517 /*4518 * Check if DBGF wants to intercept the exception.4519 */4520 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))4521 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )4522 { /* likely */ }4523 else4524 {4525 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),4526 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);4527 if (rcStrict != VINF_SUCCESS)4528 return rcStrict;4529 }4530 4531 /*4532 * Evaluate whether NMI blocking should be in effect.4533 * Normally, NMI blocking is in effect whenever we inject an NMI.4534 */4535 bool fBlockNmi = u8Vector == X86_XCPT_NMI4536 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);4537 4538 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4539 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))4540 {4541 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);4542 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)4543 return rcStrict0;4544 4545 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */4546 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)4547 {4548 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));4549 fBlockNmi = false;4550 }4551 }4552 #endif4553 4554 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM4555 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))4556 {4557 /*4558 * If the event is being injected as part of VMRUN, it isn't subject to event4559 * intercepts in the nested-guest. However, secondary exceptions that occur4560 * during injection of any event -are- subject to exception intercepts.4561 *4562 * See AMD spec. 15.20 "Event Injection".4563 */4564 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)4565 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;4566 else4567 {4568 /*4569 * Check and handle if the event being raised is intercepted.4570 */4571 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4572 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)4573 return rcStrict0;4574 }4575 }4576 #endif4577 4578 /*4579 * Set NMI blocking if necessary.4580 */4581 if (fBlockNmi)4582 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);4583 4584 /*4585 * Do recursion accounting.4586 */4587 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;4588 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;4589 if (pVCpu->iem.s.cXcptRecursions == 0)4590 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",4591 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));4592 else4593 {4594 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",4595 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,4596 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));4597 4598 if (pVCpu->iem.s.cXcptRecursions >= 4)4599 {4600 #ifdef DEBUG_bird4601 AssertFailed();4602 #endif4603 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));4604 }4605 4606 /*4607 * Evaluate the sequence of recurring events.4608 */4609 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,4610 NULL /* pXcptRaiseInfo */);4611 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)4612 { /* likely */ }4613 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)4614 {4615 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));4616 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;4617 u8Vector = X86_XCPT_DF;4618 uErr = 0;4619 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4620 /* VMX nested-guest #DF intercept needs to be checked here. */4621 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))4622 {4623 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);4624 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)4625 return rcStrict0;4626 }4627 #endif4628 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */4629 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))4630 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4631 }4632 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)4633 {4634 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));4635 return iemInitiateCpuShutdown(pVCpu);4636 }4637 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)4638 {4639 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */4640 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));4641 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))4642 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))4643 return VERR_EM_GUEST_CPU_HANG;4644 }4645 else4646 {4647 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",4648 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));4649 return VERR_IEM_IPE_9;4650 }4651 4652 /*4653 * The 'EXT' bit is set when an exception occurs during deliver of an external4654 * event (such as an interrupt or earlier exception)[1]. Privileged software4655 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software4656 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.4657 *4658 * [1] - Intel spec. 6.13 "Error Code"4659 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".4660 * [3] - Intel Instruction reference for INT n.4661 */4662 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))4663 && (fFlags & IEM_XCPT_FLAGS_ERR)4664 && u8Vector != X86_XCPT_PF4665 && u8Vector != X86_XCPT_DF)4666 {4667 uErr |= X86_TRAP_ERR_EXTERNAL;4668 }4669 }4670 4671 pVCpu->iem.s.cXcptRecursions++;4672 pVCpu->iem.s.uCurXcpt = u8Vector;4673 pVCpu->iem.s.fCurXcpt = fFlags;4674 pVCpu->iem.s.uCurXcptErr = uErr;4675 pVCpu->iem.s.uCurXcptCr2 = uCr2;4676 4677 /*4678 * Extensive logging.4679 */4680 #if defined(LOG_ENABLED) && defined(IN_RING3)4681 if (LogIs3Enabled())4682 {4683 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);4684 char szRegs[4096];4685 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),4686 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"4687 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"4688 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"4689 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"4690 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"4691 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"4692 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"4693 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"4694 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"4695 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"4696 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"4697 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"4698 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"4699 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"4700 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"4701 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"4702 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"4703 " efer=%016VR{efer}\n"4704 " pat=%016VR{pat}\n"4705 " sf_mask=%016VR{sf_mask}\n"4706 "krnl_gs_base=%016VR{krnl_gs_base}\n"4707 " lstar=%016VR{lstar}\n"4708 " star=%016VR{star} cstar=%016VR{cstar}\n"4709 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"4710 );4711 4712 char szInstr[256];4713 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,4714 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,4715 szInstr, sizeof(szInstr), NULL);4716 Log3(("%s%s\n", szRegs, szInstr));4717 }4718 #endif /* LOG_ENABLED */4719 4720 /*4721 * Stats.4722 */4723 uint64_t const uTimestamp = ASMReadTSC();4724 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))4725 {4726 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });4727 EMHistoryAddExit(pVCpu,4728 fFlags & IEM_XCPT_FLAGS_T_EXT_INT4729 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)4730 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),4731 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);4732 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);4733 }4734 else4735 {4736 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))4737 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);4738 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),4739 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);4740 if (fFlags & IEM_XCPT_FLAGS_ERR)4741 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);4742 if (fFlags & IEM_XCPT_FLAGS_CR2)4743 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);4744 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);4745 }4746 4747 /*4748 * Hack alert! Convert incoming debug events to slient on Intel.4749 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.4750 */4751 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4752 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4753 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))4754 { /* ignore */ }4755 else4756 {4757 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",4758 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));4759 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)4760 | CPUMCTX_DBG_HIT_DRX_SILENT;4761 }4762 4763 /*4764 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)4765 * to ensure that a stale TLB or paging cache entry will only cause one4766 * spurious #PF.4767 */4768 if ( u8Vector == X86_XCPT_PF4769 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))4770 IEMTlbInvalidatePage(pVCpu, uCr2);4771 4772 /*4773 * Call the mode specific worker function.4774 */4775 VBOXSTRICTRC rcStrict;4776 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))4777 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4778 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)4779 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4780 else4781 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4782 4783 /* Flush the prefetch buffer. */4784 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));4785 4786 /*4787 * Unwind.4788 */4789 pVCpu->iem.s.cXcptRecursions--;4790 pVCpu->iem.s.uCurXcpt = uPrevXcpt;4791 pVCpu->iem.s.fCurXcpt = fPrevXcpt;4792 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",4793 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,4794 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));4795 return rcStrict;4796 }4797 4798 #ifdef IEM_WITH_SETJMP4799 /**4800 * See iemRaiseXcptOrInt. Will not return.4801 */4802 DECL_NO_RETURN(void)4803 iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,4804 uint8_t cbInstr,4805 uint8_t u8Vector,4806 uint32_t fFlags,4807 uint16_t uErr,4808 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP4809 {4810 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4811 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));4812 }4813 #endif4814 4815 4816 /** \#DE - 00. */4817 VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT4818 {4819 if (GCMIsInterceptingXcptDE(pVCpu))4820 {4821 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);4822 if (rc == VINF_SUCCESS)4823 {4824 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));4825 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */4826 }4827 }4828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4829 }4830 4831 4832 #ifdef IEM_WITH_SETJMP4833 /** \#DE - 00. */4834 DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4835 {4836 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4837 }4838 #endif4839 4840 4841 /** \#DB - 01.4842 * @note This automatically clear DR7.GD. */4843 VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT4844 {4845 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */4846 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;4847 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);4848 }4849 4850 4851 /** \#BR - 05. */4852 VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT4853 {4854 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4855 }4856 4857 4858 /** \#UD - 06. */4859 VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT4860 {4861 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4862 }4863 4864 4865 #ifdef IEM_WITH_SETJMP4866 /** \#UD - 06. */4867 DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4868 {4869 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4870 }4871 #endif4872 4873 4874 /** \#NM - 07. */4875 VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT4876 {4877 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4878 }4879 4880 4881 #ifdef IEM_WITH_SETJMP4882 /** \#NM - 07. */4883 DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4884 {4885 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4886 }4887 #endif4888 4889 4890 /** \#TS(err) - 0a. */4891 VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4892 {4893 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4894 }4895 4896 4897 /** \#TS(tr) - 0a. */4898 VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT4899 {4900 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4901 pVCpu->cpum.GstCtx.tr.Sel, 0);4902 }4903 4904 4905 /** \#TS(0) - 0a. */4906 VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT4907 {4908 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4909 0, 0);4910 }4911 4912 4913 /** \#TS(err) - 0a. */4914 VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4915 {4916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4917 uSel & X86_SEL_MASK_OFF_RPL, 0);4918 }4919 4920 4921 /** \#NP(err) - 0b. */4922 VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4923 {4924 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4925 }4926 4927 4928 /** \#NP(sel) - 0b. */4929 VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4930 {4931 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",4932 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));4933 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4934 uSel & ~X86_SEL_RPL, 0);4935 }4936 4937 4938 /** \#SS(seg) - 0c. */4939 VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4940 {4941 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",4942 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));4943 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4944 uSel & ~X86_SEL_RPL, 0);4945 }4946 4947 4948 /** \#SS(err) - 0c. */4949 VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4950 {4951 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",4952 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));4953 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4954 }4955 4956 4957 /** \#GP(n) - 0d. */4958 VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4959 {4960 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));4961 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4962 }4963 4964 4965 /** \#GP(0) - 0d. */4966 VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT4967 {4968 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4969 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4970 }4971 4972 #ifdef IEM_WITH_SETJMP4973 /** \#GP(0) - 0d. */4974 DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4975 {4976 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4977 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4978 }4979 #endif4980 4981 4982 /** \#GP(sel) - 0d. */4983 VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT4984 {4985 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",4986 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));4987 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4988 Sel & ~X86_SEL_RPL, 0);4989 }4990 4991 4992 /** \#GP(0) - 0d. */4993 VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT4994 {4995 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4996 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4997 }4998 4999 5000 /** \#GP(sel) - 0d. */5001 VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT5002 {5003 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5004 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5005 NOREF(iSegReg); NOREF(fAccess);5006 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,5007 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5008 }5009 5010 #ifdef IEM_WITH_SETJMP5011 /** \#GP(sel) - 0d, longjmp. */5012 DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP5013 {5014 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5015 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5016 NOREF(iSegReg); NOREF(fAccess);5017 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,5018 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5019 }5020 #endif5021 5022 /** \#GP(sel) - 0d. */5023 VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT5024 {5025 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));5027 NOREF(Sel);5028 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5029 }5030 5031 #ifdef IEM_WITH_SETJMP5032 /** \#GP(sel) - 0d, longjmp. */5033 DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP5034 {5035 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",5036 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));5037 NOREF(Sel);5038 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5039 }5040 #endif5041 5042 5043 /** \#GP(sel) - 0d. */5044 VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT5045 {5046 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5047 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5048 NOREF(iSegReg); NOREF(fAccess);5049 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5050 }5051 5052 #ifdef IEM_WITH_SETJMP5053 /** \#GP(sel) - 0d, longjmp. */5054 DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP5055 {5056 NOREF(iSegReg); NOREF(fAccess);5057 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5058 }5059 #endif5060 5061 5062 /** \#PF(n) - 0e. */5063 VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT5064 {5065 uint16_t uErr;5066 switch (rc)5067 {5068 case VERR_PAGE_NOT_PRESENT:5069 case VERR_PAGE_TABLE_NOT_PRESENT:5070 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:5071 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:5072 uErr = 0;5073 break;5074 5075 case VERR_RESERVED_PAGE_TABLE_BITS:5076 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;5077 break;5078 5079 default:5080 AssertMsgFailed(("%Rrc\n", rc));5081 RT_FALL_THRU();5082 case VERR_ACCESS_DENIED:5083 uErr = X86_TRAP_PF_P;5084 break;5085 }5086 5087 if (IEM_GET_CPL(pVCpu) == 3)5088 uErr |= X86_TRAP_PF_US;5089 5090 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE5091 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)5092 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )5093 uErr |= X86_TRAP_PF_ID;5094 5095 #if 0 /* This is so much non-sense, really. Why was it done like that? */5096 /* Note! RW access callers reporting a WRITE protection fault, will clear5097 the READ flag before calling. So, read-modify-write accesses (RW)5098 can safely be reported as READ faults. */5099 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)5100 uErr |= X86_TRAP_PF_RW;5101 #else5102 if (fAccess & IEM_ACCESS_TYPE_WRITE)5103 {5104 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg5105 /// (regardless of outcome of the comparison in the latter case).5106 //if (!(fAccess & IEM_ACCESS_TYPE_READ))5107 uErr |= X86_TRAP_PF_RW;5108 }5109 #endif5110 5111 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address5112 of the memory operand rather than at the start of it. (Not sure what5113 happens if it crosses a page boundrary.) The current heuristics for5114 this is to report the #PF for the last byte if the access is more than5115 64 bytes. This is probably not correct, but we can work that out later,5116 main objective now is to get FXSAVE to work like for real hardware and5117 make bs3-cpu-basic2 work. */5118 if (cbAccess <= 64)5119 { /* likely*/ }5120 else5121 GCPtrWhere += cbAccess - 1;5122 5123 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,5124 uErr, GCPtrWhere);5125 }5126 5127 #ifdef IEM_WITH_SETJMP5128 /** \#PF(n) - 0e, longjmp. */5129 DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,5130 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP5131 {5132 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));5133 }5134 #endif5135 5136 5137 /** \#MF(0) - 10. */5138 VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT5139 {5140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)5141 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5142 5143 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */5144 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);5145 return iemRegUpdateRipAndFinishClearingRF(pVCpu);5146 }5147 5148 #ifdef IEM_WITH_SETJMP5149 /** \#MF(0) - 10, longjmp. */5150 DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5151 {5152 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));5153 }5154 #endif5155 5156 5157 /** \#AC(0) - 11. */5158 VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT5159 {5160 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5161 }5162 5163 #ifdef IEM_WITH_SETJMP5164 /** \#AC(0) - 11, longjmp. */5165 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5166 {5167 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));5168 }5169 #endif5170 5171 5172 /** \#XF(0)/\#XM(0) - 19. */5173 VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT5174 {5175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5176 }5177 5178 5179 #ifdef IEM_WITH_SETJMP5180 /** \#XF(0)/\#XM(0) - 19s, longjmp. */5181 DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5182 {5183 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));5184 }5185 #endif5186 5187 5188 /** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */5189 IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)5190 {5191 NOREF(cbInstr);5192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5193 }5194 5195 5196 /** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */5197 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)5198 {5199 NOREF(cbInstr);5200 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5201 }5202 5203 5204 /** Accessed via IEMOP_RAISE_INVALID_OPCODE. */5205 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)5206 {5207 NOREF(cbInstr);5208 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5209 }5210 5211 5212 /** @} */5213 5214 /** @name Common opcode decoders.5215 * @{5216 */5217 //#include <iprt/mem.h>5218 5219 /**5220 * Used to add extra details about a stub case.5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.5222 */5223 void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT5224 {5225 #if defined(LOG_ENABLED) && defined(IN_RING3)5226 PVM pVM = pVCpu->CTX_SUFF(pVM);5227 char szRegs[4096];5228 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),5229 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"5230 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"5231 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"5232 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"5233 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"5234 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"5235 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"5236 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"5237 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"5238 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"5239 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"5240 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"5241 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"5242 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"5243 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"5244 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"5245 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"5246 " efer=%016VR{efer}\n"5247 " pat=%016VR{pat}\n"5248 " sf_mask=%016VR{sf_mask}\n"5249 "krnl_gs_base=%016VR{krnl_gs_base}\n"5250 " lstar=%016VR{lstar}\n"5251 " star=%016VR{star} cstar=%016VR{cstar}\n"5252 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"5253 );5254 5255 char szInstr[256];5256 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,5257 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,5258 szInstr, sizeof(szInstr), NULL);5259 5260 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);5261 #else5262 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);5263 #endif5264 }5265 5266 /** @} */5267 5268 5269 5270 2267 /** @name Register Access. 5271 2268 * @{ … … 5418 2415 */ 5419 2416 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS); 5420 }5421 5422 /** @} */5423 5424 5425 /** @name FPU access and helpers.5426 *5427 * @{5428 */5429 5430 /**5431 * Updates the x87.DS and FPUDP registers.5432 *5433 * @param pVCpu The cross context virtual CPU structure of the calling thread.5434 * @param pFpuCtx The FPU context.5435 * @param iEffSeg The effective segment register.5436 * @param GCPtrEff The effective address relative to @a iEffSeg.5437 */5438 DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)5439 {5440 RTSEL sel;5441 switch (iEffSeg)5442 {5443 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;5444 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;5445 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;5446 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;5447 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;5448 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;5449 default:5450 AssertMsgFailed(("%d\n", iEffSeg));5451 sel = pVCpu->cpum.GstCtx.ds.Sel;5452 }5453 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */5454 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))5455 {5456 pFpuCtx->DS = 0;5457 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);5458 }5459 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */5460 {5461 pFpuCtx->DS = sel;5462 pFpuCtx->FPUDP = GCPtrEff;5463 }5464 else5465 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;5466 }5467 5468 5469 /**5470 * Rotates the stack registers in the push direction.5471 *5472 * @param pFpuCtx The FPU context.5473 * @remarks This is a complete waste of time, but fxsave stores the registers in5474 * stack order.5475 */5476 DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)5477 {5478 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;5479 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;5480 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;5481 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;5482 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;5483 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;5484 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;5485 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;5486 pFpuCtx->aRegs[0].r80 = r80Tmp;5487 }5488 5489 5490 /**5491 * Rotates the stack registers in the pop direction.5492 *5493 * @param pFpuCtx The FPU context.5494 * @remarks This is a complete waste of time, but fxsave stores the registers in5495 * stack order.5496 */5497 DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)5498 {5499 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;5500 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;5501 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;5502 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;5503 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;5504 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;5505 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;5506 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;5507 pFpuCtx->aRegs[7].r80 = r80Tmp;5508 }5509 5510 5511 /**5512 * Updates FSW and pushes a FPU result onto the FPU stack if no pending5513 * exception prevents it.5514 *5515 * @param pVCpu The cross context virtual CPU structure of the calling thread.5516 * @param pResult The FPU operation result to push.5517 * @param pFpuCtx The FPU context.5518 */5519 static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT5520 {5521 /* Update FSW and bail if there are pending exceptions afterwards. */5522 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;5523 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5524 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5525 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5526 {5527 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))5528 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",5529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));5530 pFpuCtx->FSW = fFsw;5531 return;5532 }5533 5534 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;5535 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))5536 {5537 /* All is fine, push the actual value. */5538 pFpuCtx->FTW |= RT_BIT(iNewTop);5539 pFpuCtx->aRegs[7].r80 = pResult->r80Result;5540 }5541 else if (pFpuCtx->FCW & X86_FCW_IM)5542 {5543 /* Masked stack overflow, push QNaN. */5544 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;5545 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);5546 }5547 else5548 {5549 /* Raise stack overflow, don't push anything. */5550 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;5551 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;5552 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",5553 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5554 return;5555 }5556 5557 fFsw &= ~X86_FSW_TOP_MASK;5558 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;5559 pFpuCtx->FSW = fFsw;5560 5561 iemFpuRotateStackPush(pFpuCtx);5562 RT_NOREF(pVCpu);5563 }5564 5565 5566 /**5567 * Stores a result in a FPU register and updates the FSW and FTW.5568 *5569 * @param pVCpu The cross context virtual CPU structure of the calling thread.5570 * @param pFpuCtx The FPU context.5571 * @param pResult The result to store.5572 * @param iStReg Which FPU register to store it in.5573 */5574 static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT5575 {5576 Assert(iStReg < 8);5577 uint16_t fNewFsw = pFpuCtx->FSW;5578 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;5579 fNewFsw &= ~X86_FSW_C_MASK;5580 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5581 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5582 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",5583 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));5584 pFpuCtx->FSW = fNewFsw;5585 pFpuCtx->FTW |= RT_BIT(iReg);5586 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;5587 RT_NOREF(pVCpu);5588 }5589 5590 5591 /**5592 * Only updates the FPU status word (FSW) with the result of the current5593 * instruction.5594 *5595 * @param pVCpu The cross context virtual CPU structure of the calling thread.5596 * @param pFpuCtx The FPU context.5597 * @param u16FSW The FSW output of the current instruction.5598 */5599 static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT5600 {5601 uint16_t fNewFsw = pFpuCtx->FSW;5602 fNewFsw &= ~X86_FSW_C_MASK;5603 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;5604 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5605 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",5606 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));5607 pFpuCtx->FSW = fNewFsw;5608 RT_NOREF(pVCpu);5609 }5610 5611 5612 /**5613 * Pops one item off the FPU stack if no pending exception prevents it.5614 *5615 * @param pFpuCtx The FPU context.5616 */5617 static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT5618 {5619 /* Check pending exceptions. */5620 uint16_t uFSW = pFpuCtx->FSW;5621 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5622 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5623 return;5624 5625 /* TOP--. */5626 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;5627 uFSW &= ~X86_FSW_TOP_MASK;5628 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;5629 pFpuCtx->FSW = uFSW;5630 5631 /* Mark the previous ST0 as empty. */5632 iOldTop >>= X86_FSW_TOP_SHIFT;5633 pFpuCtx->FTW &= ~RT_BIT(iOldTop);5634 5635 /* Rotate the registers. */5636 iemFpuRotateStackPop(pFpuCtx);5637 }5638 5639 5640 /**5641 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.5642 *5643 * @param pVCpu The cross context virtual CPU structure of the calling thread.5644 * @param pResult The FPU operation result to push.5645 * @param uFpuOpcode The FPU opcode value.5646 */5647 void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT5648 {5649 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5650 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5651 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);5652 }5653 5654 5655 /**5656 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,5657 * and sets FPUDP and FPUDS.5658 *5659 * @param pVCpu The cross context virtual CPU structure of the calling thread.5660 * @param pResult The FPU operation result to push.5661 * @param iEffSeg The effective segment register.5662 * @param GCPtrEff The effective address relative to @a iEffSeg.5663 * @param uFpuOpcode The FPU opcode value.5664 */5665 void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,5666 uint16_t uFpuOpcode) RT_NOEXCEPT5667 {5668 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5669 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5670 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5671 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);5672 }5673 5674 5675 /**5676 * Replace ST0 with the first value and push the second onto the FPU stack,5677 * unless a pending exception prevents it.5678 *5679 * @param pVCpu The cross context virtual CPU structure of the calling thread.5680 * @param pResult The FPU operation result to store and push.5681 * @param uFpuOpcode The FPU opcode value.5682 */5683 void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT5684 {5685 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5686 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5687 5688 /* Update FSW and bail if there are pending exceptions afterwards. */5689 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;5690 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5691 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5692 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5693 {5694 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5695 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",5696 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));5697 pFpuCtx->FSW = fFsw;5698 return;5699 }5700 5701 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;5702 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))5703 {5704 /* All is fine, push the actual value. */5705 pFpuCtx->FTW |= RT_BIT(iNewTop);5706 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;5707 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;5708 }5709 else if (pFpuCtx->FCW & X86_FCW_IM)5710 {5711 /* Masked stack overflow, push QNaN. */5712 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;5713 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);5714 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);5715 }5716 else5717 {5718 /* Raise stack overflow, don't push anything. */5719 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;5720 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;5721 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",5722 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5723 return;5724 }5725 5726 fFsw &= ~X86_FSW_TOP_MASK;5727 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;5728 pFpuCtx->FSW = fFsw;5729 5730 iemFpuRotateStackPush(pFpuCtx);5731 }5732 5733 5734 /**5735 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and5736 * FOP.5737 *5738 * @param pVCpu The cross context virtual CPU structure of the calling thread.5739 * @param pResult The result to store.5740 * @param iStReg Which FPU register to store it in.5741 * @param uFpuOpcode The FPU opcode value.5742 */5743 void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5744 {5745 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5746 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5747 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5748 }5749 5750 5751 /**5752 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and5753 * FOP, and then pops the stack.5754 *5755 * @param pVCpu The cross context virtual CPU structure of the calling thread.5756 * @param pResult The result to store.5757 * @param iStReg Which FPU register to store it in.5758 * @param uFpuOpcode The FPU opcode value.5759 */5760 void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5761 {5762 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5763 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5764 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5765 iemFpuMaybePopOne(pFpuCtx);5766 }5767 5768 5769 /**5770 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,5771 * FPUDP, and FPUDS.5772 *5773 * @param pVCpu The cross context virtual CPU structure of the calling thread.5774 * @param pResult The result to store.5775 * @param iStReg Which FPU register to store it in.5776 * @param iEffSeg The effective memory operand selector register.5777 * @param GCPtrEff The effective memory operand offset.5778 * @param uFpuOpcode The FPU opcode value.5779 */5780 void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,5781 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5782 {5783 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5784 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5785 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5786 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5787 }5788 5789 5790 /**5791 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,5792 * FPUDP, and FPUDS, and then pops the stack.5793 *5794 * @param pVCpu The cross context virtual CPU structure of the calling thread.5795 * @param pResult The result to store.5796 * @param iStReg Which FPU register to store it in.5797 * @param iEffSeg The effective memory operand selector register.5798 * @param GCPtrEff The effective memory operand offset.5799 * @param uFpuOpcode The FPU opcode value.5800 */5801 void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,5802 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5803 {5804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5805 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5806 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5807 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5808 iemFpuMaybePopOne(pFpuCtx);5809 }5810 5811 5812 /**5813 * Updates the FOP, FPUIP, and FPUCS. For FNOP.5814 *5815 * @param pVCpu The cross context virtual CPU structure of the calling thread.5816 * @param uFpuOpcode The FPU opcode value.5817 */5818 void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT5819 {5820 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5821 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5822 }5823 5824 5825 /**5826 * Updates the FSW, FOP, FPUIP, and FPUCS.5827 *5828 * @param pVCpu The cross context virtual CPU structure of the calling thread.5829 * @param u16FSW The FSW from the current instruction.5830 * @param uFpuOpcode The FPU opcode value.5831 */5832 void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5833 {5834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5835 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5836 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5837 }5838 5839 5840 /**5841 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.5842 *5843 * @param pVCpu The cross context virtual CPU structure of the calling thread.5844 * @param u16FSW The FSW from the current instruction.5845 * @param uFpuOpcode The FPU opcode value.5846 */5847 void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5848 {5849 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5850 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5851 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5852 iemFpuMaybePopOne(pFpuCtx);5853 }5854 5855 5856 /**5857 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.5858 *5859 * @param pVCpu The cross context virtual CPU structure of the calling thread.5860 * @param u16FSW The FSW from the current instruction.5861 * @param iEffSeg The effective memory operand selector register.5862 * @param GCPtrEff The effective memory operand offset.5863 * @param uFpuOpcode The FPU opcode value.5864 */5865 void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5866 {5867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5868 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5869 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5870 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5871 }5872 5873 5874 /**5875 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.5876 *5877 * @param pVCpu The cross context virtual CPU structure of the calling thread.5878 * @param u16FSW The FSW from the current instruction.5879 * @param uFpuOpcode The FPU opcode value.5880 */5881 void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5882 {5883 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5884 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5885 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5886 iemFpuMaybePopOne(pFpuCtx);5887 iemFpuMaybePopOne(pFpuCtx);5888 }5889 5890 5891 /**5892 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.5893 *5894 * @param pVCpu The cross context virtual CPU structure of the calling thread.5895 * @param u16FSW The FSW from the current instruction.5896 * @param iEffSeg The effective memory operand selector register.5897 * @param GCPtrEff The effective memory operand offset.5898 * @param uFpuOpcode The FPU opcode value.5899 */5900 void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,5901 uint16_t uFpuOpcode) RT_NOEXCEPT5902 {5903 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5904 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5905 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5906 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5907 iemFpuMaybePopOne(pFpuCtx);5908 }5909 5910 5911 /**5912 * Worker routine for raising an FPU stack underflow exception.5913 *5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.5915 * @param pFpuCtx The FPU context.5916 * @param iStReg The stack register being accessed.5917 */5918 static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)5919 {5920 Assert(iStReg < 8 || iStReg == UINT8_MAX);5921 if (pFpuCtx->FCW & X86_FCW_IM)5922 {5923 /* Masked underflow. */5924 pFpuCtx->FSW &= ~X86_FSW_C_MASK;5925 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;5926 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;5927 if (iStReg != UINT8_MAX)5928 {5929 pFpuCtx->FTW |= RT_BIT(iReg);5930 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);5931 }5932 }5933 else5934 {5935 pFpuCtx->FSW &= ~X86_FSW_C_MASK;5936 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;5937 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",5938 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5939 }5940 RT_NOREF(pVCpu);5941 }5942 5943 5944 /**5945 * Raises a FPU stack underflow exception.5946 *5947 * @param pVCpu The cross context virtual CPU structure of the calling thread.5948 * @param iStReg The destination register that should be loaded5949 * with QNaN if \#IS is not masked. Specify5950 * UINT8_MAX if none (like for fcom).5951 * @param uFpuOpcode The FPU opcode value.5952 */5953 void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5954 {5955 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5956 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5957 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5958 }5959 5960 5961 void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5962 {5963 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5964 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5965 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5966 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5967 }5968 5969 5970 void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5971 {5972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5973 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5974 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5975 iemFpuMaybePopOne(pFpuCtx);5976 }5977 5978 5979 void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,5980 uint16_t uFpuOpcode) RT_NOEXCEPT5981 {5982 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5983 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5984 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5985 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5986 iemFpuMaybePopOne(pFpuCtx);5987 }5988 5989 5990 void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT5991 {5992 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5993 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5994 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);5995 iemFpuMaybePopOne(pFpuCtx);5996 iemFpuMaybePopOne(pFpuCtx);5997 }5998 5999 6000 void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6001 {6002 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6003 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6004 6005 if (pFpuCtx->FCW & X86_FCW_IM)6006 {6007 /* Masked overflow - Push QNaN. */6008 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6009 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6010 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;6011 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6012 pFpuCtx->FTW |= RT_BIT(iNewTop);6013 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6014 iemFpuRotateStackPush(pFpuCtx);6015 }6016 else6017 {6018 /* Exception pending - don't change TOP or the register stack. */6019 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6020 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6021 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",6022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6023 }6024 }6025 6026 6027 void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6028 {6029 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6030 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6031 6032 if (pFpuCtx->FCW & X86_FCW_IM)6033 {6034 /* Masked overflow - Push QNaN. */6035 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6036 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6037 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;6038 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6039 pFpuCtx->FTW |= RT_BIT(iNewTop);6040 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);6041 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6042 iemFpuRotateStackPush(pFpuCtx);6043 }6044 else6045 {6046 /* Exception pending - don't change TOP or the register stack. */6047 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6048 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6049 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",6050 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6051 }6052 }6053 6054 6055 /**6056 * Worker routine for raising an FPU stack overflow exception on a push.6057 *6058 * @param pVCpu The cross context virtual CPU structure of the calling thread.6059 * @param pFpuCtx The FPU context.6060 */6061 static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT6062 {6063 if (pFpuCtx->FCW & X86_FCW_IM)6064 {6065 /* Masked overflow. */6066 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6067 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6068 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;6069 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6070 pFpuCtx->FTW |= RT_BIT(iNewTop);6071 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6072 iemFpuRotateStackPush(pFpuCtx);6073 }6074 else6075 {6076 /* Exception pending - don't change TOP or the register stack. */6077 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6078 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6079 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",6080 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6081 }6082 RT_NOREF(pVCpu);6083 }6084 6085 6086 /**6087 * Raises a FPU stack overflow exception on a push.6088 *6089 * @param pVCpu The cross context virtual CPU structure of the calling thread.6090 * @param uFpuOpcode The FPU opcode value.6091 */6092 void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6093 {6094 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6095 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6096 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);6097 }6098 6099 6100 /**6101 * Raises a FPU stack overflow exception on a push with a memory operand.6102 *6103 * @param pVCpu The cross context virtual CPU structure of the calling thread.6104 * @param iEffSeg The effective memory operand selector register.6105 * @param GCPtrEff The effective memory operand offset.6106 * @param uFpuOpcode The FPU opcode value.6107 */6108 void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT6109 {6110 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6111 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);6112 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6113 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);6114 2417 } 6115 2418 … … 8487 4790 * @param uErrorCode The error code associated with the exception. 8488 4791 */ 8489 staticVBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,8490 4792 VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, 4793 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT 8491 4794 { 8492 4795 AssertPtr(pDesc); … … 10369 6672 * @param cbInstr The instruction length (only relevant for 10370 6673 * software interrupts). 6674 * @note x86 specific, but difficult to move due to iemInitDecoder dep. 10371 6675 */ 10372 6676 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2, … … 10481 6785 } 10482 6786 10483 10484 /**10485 * Interface for HM and EM for executing string I/O OUT (write) instructions.10486 *10487 * This API ASSUMES that the caller has already verified that the guest code is10488 * allowed to access the I/O port. (The I/O port is in the DX register in the10489 * guest state.)10490 *10491 * @returns Strict VBox status code.10492 * @param pVCpu The cross context virtual CPU structure.10493 * @param cbValue The size of the I/O port access (1, 2, or 4).10494 * @param enmAddrMode The addressing mode.10495 * @param fRepPrefix Indicates whether a repeat prefix is used10496 * (doesn't matter which for this instruction).10497 * @param cbInstr The instruction length in bytes.10498 * @param iEffSeg The effective segment address.10499 * @param fIoChecked Whether the access to the I/O port has been10500 * checked or not. It's typically checked in the10501 * HM scenario.10502 */10503 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,10504 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)10505 {10506 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);10507 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10508 10509 /*10510 * State init.10511 */10512 iemInitExec(pVCpu, 0 /*fExecOpts*/);10513 10514 /*10515 * Switch orgy for getting to the right handler.10516 */10517 VBOXSTRICTRC rcStrict;10518 if (fRepPrefix)10519 {10520 switch (enmAddrMode)10521 {10522 case IEMMODE_16BIT:10523 switch (cbValue)10524 {10525 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10526 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10527 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10528 default:10529 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10530 }10531 break;10532 10533 case IEMMODE_32BIT:10534 switch (cbValue)10535 {10536 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10537 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10538 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10539 default:10540 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10541 }10542 break;10543 10544 case IEMMODE_64BIT:10545 switch (cbValue)10546 {10547 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10548 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10549 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10550 default:10551 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10552 }10553 break;10554 10555 default:10556 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10557 }10558 }10559 else10560 {10561 switch (enmAddrMode)10562 {10563 case IEMMODE_16BIT:10564 switch (cbValue)10565 {10566 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10567 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10568 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10569 default:10570 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10571 }10572 break;10573 10574 case IEMMODE_32BIT:10575 switch (cbValue)10576 {10577 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10578 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10579 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10580 default:10581 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10582 }10583 break;10584 10585 case IEMMODE_64BIT:10586 switch (cbValue)10587 {10588 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10589 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10590 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10591 default:10592 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10593 }10594 break;10595 10596 default:10597 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10598 }10599 }10600 10601 if (pVCpu->iem.s.cActiveMappings)10602 iemMemRollback(pVCpu);10603 10604 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10605 }10606 10607 10608 /**10609 * Interface for HM and EM for executing string I/O IN (read) instructions.10610 *10611 * This API ASSUMES that the caller has already verified that the guest code is10612 * allowed to access the I/O port. (The I/O port is in the DX register in the10613 * guest state.)10614 *10615 * @returns Strict VBox status code.10616 * @param pVCpu The cross context virtual CPU structure.10617 * @param cbValue The size of the I/O port access (1, 2, or 4).10618 * @param enmAddrMode The addressing mode.10619 * @param fRepPrefix Indicates whether a repeat prefix is used10620 * (doesn't matter which for this instruction).10621 * @param cbInstr The instruction length in bytes.10622 * @param fIoChecked Whether the access to the I/O port has been10623 * checked or not. It's typically checked in the10624 * HM scenario.10625 */10626 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,10627 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)10628 {10629 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10630 10631 /*10632 * State init.10633 */10634 iemInitExec(pVCpu, 0 /*fExecOpts*/);10635 10636 /*10637 * Switch orgy for getting to the right handler.10638 */10639 VBOXSTRICTRC rcStrict;10640 if (fRepPrefix)10641 {10642 switch (enmAddrMode)10643 {10644 case IEMMODE_16BIT:10645 switch (cbValue)10646 {10647 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;10648 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;10649 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;10650 default:10651 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10652 }10653 break;10654 10655 case IEMMODE_32BIT:10656 switch (cbValue)10657 {10658 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;10659 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;10660 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;10661 default:10662 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10663 }10664 break;10665 10666 case IEMMODE_64BIT:10667 switch (cbValue)10668 {10669 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;10670 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;10671 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;10672 default:10673 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10674 }10675 break;10676 10677 default:10678 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10679 }10680 }10681 else10682 {10683 switch (enmAddrMode)10684 {10685 case IEMMODE_16BIT:10686 switch (cbValue)10687 {10688 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;10689 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;10690 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;10691 default:10692 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10693 }10694 break;10695 10696 case IEMMODE_32BIT:10697 switch (cbValue)10698 {10699 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;10700 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;10701 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;10702 default:10703 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10704 }10705 break;10706 10707 case IEMMODE_64BIT:10708 switch (cbValue)10709 {10710 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;10711 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;10712 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;10713 default:10714 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10715 }10716 break;10717 10718 default:10719 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10720 }10721 }10722 10723 if ( pVCpu->iem.s.cActiveMappings == 010724 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))10725 { /* likely */ }10726 else10727 {10728 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));10729 iemMemRollback(pVCpu);10730 }10731 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10732 }10733 10734 10735 /**10736 * Interface for rawmode to write execute an OUT instruction.10737 *10738 * @returns Strict VBox status code.10739 * @param pVCpu The cross context virtual CPU structure.10740 * @param cbInstr The instruction length in bytes.10741 * @param u16Port The port to read.10742 * @param fImm Whether the port is specified using an immediate operand or10743 * using the implicit DX register.10744 * @param cbReg The register size.10745 *10746 * @remarks In ring-0 not all of the state needs to be synced in.10747 */10748 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)10749 {10750 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10751 Assert(cbReg <= 4 && cbReg != 3);10752 10753 iemInitExec(pVCpu, 0 /*fExecOpts*/);10754 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,10755 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);10756 Assert(!pVCpu->iem.s.cActiveMappings);10757 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10758 }10759 10760 10761 /**10762 * Interface for rawmode to write execute an IN instruction.10763 *10764 * @returns Strict VBox status code.10765 * @param pVCpu The cross context virtual CPU structure.10766 * @param cbInstr The instruction length in bytes.10767 * @param u16Port The port to read.10768 * @param fImm Whether the port is specified using an immediate operand or10769 * using the implicit DX.10770 * @param cbReg The register size.10771 */10772 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)10773 {10774 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10775 Assert(cbReg <= 4 && cbReg != 3);10776 10777 iemInitExec(pVCpu, 0 /*fExecOpts*/);10778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,10779 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);10780 Assert(!pVCpu->iem.s.cActiveMappings);10781 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10782 }10783 10784 10785 /**10786 * Interface for HM and EM to write to a CRx register.10787 *10788 * @returns Strict VBox status code.10789 * @param pVCpu The cross context virtual CPU structure.10790 * @param cbInstr The instruction length in bytes.10791 * @param iCrReg The control register number (destination).10792 * @param iGReg The general purpose register number (source).10793 *10794 * @remarks In ring-0 not all of the state needs to be synced in.10795 */10796 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)10797 {10798 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10799 Assert(iCrReg < 16);10800 Assert(iGReg < 16);10801 10802 iemInitExec(pVCpu, 0 /*fExecOpts*/);10803 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);10804 Assert(!pVCpu->iem.s.cActiveMappings);10805 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10806 }10807 10808 10809 /**10810 * Interface for HM and EM to read from a CRx register.10811 *10812 * @returns Strict VBox status code.10813 * @param pVCpu The cross context virtual CPU structure.10814 * @param cbInstr The instruction length in bytes.10815 * @param iGReg The general purpose register number (destination).10816 * @param iCrReg The control register number (source).10817 *10818 * @remarks In ring-0 not all of the state needs to be synced in.10819 */10820 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)10821 {10822 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10823 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR410824 | CPUMCTX_EXTRN_APIC_TPR);10825 Assert(iCrReg < 16);10826 Assert(iGReg < 16);10827 10828 iemInitExec(pVCpu, 0 /*fExecOpts*/);10829 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);10830 Assert(!pVCpu->iem.s.cActiveMappings);10831 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10832 }10833 10834 10835 /**10836 * Interface for HM and EM to write to a DRx register.10837 *10838 * @returns Strict VBox status code.10839 * @param pVCpu The cross context virtual CPU structure.10840 * @param cbInstr The instruction length in bytes.10841 * @param iDrReg The debug register number (destination).10842 * @param iGReg The general purpose register number (source).10843 *10844 * @remarks In ring-0 not all of the state needs to be synced in.10845 */10846 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)10847 {10848 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10849 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);10850 Assert(iDrReg < 8);10851 Assert(iGReg < 16);10852 10853 iemInitExec(pVCpu, 0 /*fExecOpts*/);10854 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);10855 Assert(!pVCpu->iem.s.cActiveMappings);10856 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10857 }10858 10859 10860 /**10861 * Interface for HM and EM to read from a DRx register.10862 *10863 * @returns Strict VBox status code.10864 * @param pVCpu The cross context virtual CPU structure.10865 * @param cbInstr The instruction length in bytes.10866 * @param iGReg The general purpose register number (destination).10867 * @param iDrReg The debug register number (source).10868 *10869 * @remarks In ring-0 not all of the state needs to be synced in.10870 */10871 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)10872 {10873 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10874 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);10875 Assert(iDrReg < 8);10876 Assert(iGReg < 16);10877 10878 iemInitExec(pVCpu, 0 /*fExecOpts*/);10879 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);10880 Assert(!pVCpu->iem.s.cActiveMappings);10881 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10882 }10883 10884 10885 /**10886 * Interface for HM and EM to clear the CR0[TS] bit.10887 *10888 * @returns Strict VBox status code.10889 * @param pVCpu The cross context virtual CPU structure.10890 * @param cbInstr The instruction length in bytes.10891 *10892 * @remarks In ring-0 not all of the state needs to be synced in.10893 */10894 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)10895 {10896 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10897 10898 iemInitExec(pVCpu, 0 /*fExecOpts*/);10899 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);10900 Assert(!pVCpu->iem.s.cActiveMappings);10901 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10902 }10903 10904 10905 /**10906 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).10907 *10908 * @returns Strict VBox status code.10909 * @param pVCpu The cross context virtual CPU structure.10910 * @param cbInstr The instruction length in bytes.10911 * @param uValue The value to load into CR0.10912 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a10913 * memory operand. Otherwise pass NIL_RTGCPTR.10914 *10915 * @remarks In ring-0 not all of the state needs to be synced in.10916 */10917 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)10918 {10919 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);10920 10921 iemInitExec(pVCpu, 0 /*fExecOpts*/);10922 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);10923 Assert(!pVCpu->iem.s.cActiveMappings);10924 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10925 }10926 10927 10928 /**10929 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).10930 *10931 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.10932 *10933 * @returns Strict VBox status code.10934 * @param pVCpu The cross context virtual CPU structure of the calling EMT.10935 * @param cbInstr The instruction length in bytes.10936 * @remarks In ring-0 not all of the state needs to be synced in.10937 * @thread EMT(pVCpu)10938 */10939 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)10940 {10941 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);10942 10943 iemInitExec(pVCpu, 0 /*fExecOpts*/);10944 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);10945 Assert(!pVCpu->iem.s.cActiveMappings);10946 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10947 }10948 10949 10950 /**10951 * Interface for HM and EM to emulate the WBINVD instruction.10952 *10953 * @returns Strict VBox status code.10954 * @param pVCpu The cross context virtual CPU structure.10955 * @param cbInstr The instruction length in bytes.10956 *10957 * @remarks In ring-0 not all of the state needs to be synced in.10958 */10959 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)10960 {10961 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10962 10963 iemInitExec(pVCpu, 0 /*fExecOpts*/);10964 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);10965 Assert(!pVCpu->iem.s.cActiveMappings);10966 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10967 }10968 10969 10970 /**10971 * Interface for HM and EM to emulate the INVD instruction.10972 *10973 * @returns Strict VBox status code.10974 * @param pVCpu The cross context virtual CPU structure.10975 * @param cbInstr The instruction length in bytes.10976 *10977 * @remarks In ring-0 not all of the state needs to be synced in.10978 */10979 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)10980 {10981 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10982 10983 iemInitExec(pVCpu, 0 /*fExecOpts*/);10984 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);10985 Assert(!pVCpu->iem.s.cActiveMappings);10986 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10987 }10988 10989 10990 /**10991 * Interface for HM and EM to emulate the INVLPG instruction.10992 *10993 * @returns Strict VBox status code.10994 * @retval VINF_PGM_SYNC_CR310995 *10996 * @param pVCpu The cross context virtual CPU structure.10997 * @param cbInstr The instruction length in bytes.10998 * @param GCPtrPage The effective address of the page to invalidate.10999 *11000 * @remarks In ring-0 not all of the state needs to be synced in.11001 */11002 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)11003 {11004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11005 11006 iemInitExec(pVCpu, 0 /*fExecOpts*/);11007 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);11008 Assert(!pVCpu->iem.s.cActiveMappings);11009 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11010 }11011 11012 11013 /**11014 * Interface for HM and EM to emulate the INVPCID instruction.11015 *11016 * @returns Strict VBox status code.11017 * @retval VINF_PGM_SYNC_CR311018 *11019 * @param pVCpu The cross context virtual CPU structure.11020 * @param cbInstr The instruction length in bytes.11021 * @param iEffSeg The effective segment register.11022 * @param GCPtrDesc The effective address of the INVPCID descriptor.11023 * @param uType The invalidation type.11024 *11025 * @remarks In ring-0 not all of the state needs to be synced in.11026 */11027 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,11028 uint64_t uType)11029 {11030 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);11031 11032 iemInitExec(pVCpu, 0 /*fExecOpts*/);11033 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);11034 Assert(!pVCpu->iem.s.cActiveMappings);11035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11036 }11037 11038 11039 /**11040 * Interface for HM and EM to emulate the CPUID instruction.11041 *11042 * @returns Strict VBox status code.11043 *11044 * @param pVCpu The cross context virtual CPU structure.11045 * @param cbInstr The instruction length in bytes.11046 *11047 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.11048 */11049 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)11050 {11051 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11052 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);11053 11054 iemInitExec(pVCpu, 0 /*fExecOpts*/);11055 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);11056 Assert(!pVCpu->iem.s.cActiveMappings);11057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11058 }11059 11060 11061 /**11062 * Interface for HM and EM to emulate the RDPMC instruction.11063 *11064 * @returns Strict VBox status code.11065 *11066 * @param pVCpu The cross context virtual CPU structure.11067 * @param cbInstr The instruction length in bytes.11068 *11069 * @remarks Not all of the state needs to be synced in.11070 */11071 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)11072 {11073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11074 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);11075 11076 iemInitExec(pVCpu, 0 /*fExecOpts*/);11077 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);11078 Assert(!pVCpu->iem.s.cActiveMappings);11079 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11080 }11081 11082 11083 /**11084 * Interface for HM and EM to emulate the RDTSC instruction.11085 *11086 * @returns Strict VBox status code.11087 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11088 *11089 * @param pVCpu The cross context virtual CPU structure.11090 * @param cbInstr The instruction length in bytes.11091 *11092 * @remarks Not all of the state needs to be synced in.11093 */11094 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)11095 {11096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11097 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);11098 11099 iemInitExec(pVCpu, 0 /*fExecOpts*/);11100 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);11101 Assert(!pVCpu->iem.s.cActiveMappings);11102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11103 }11104 11105 11106 /**11107 * Interface for HM and EM to emulate the RDTSCP instruction.11108 *11109 * @returns Strict VBox status code.11110 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11111 *11112 * @param pVCpu The cross context virtual CPU structure.11113 * @param cbInstr The instruction length in bytes.11114 *11115 * @remarks Not all of the state needs to be synced in. Recommended11116 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.11117 */11118 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)11119 {11120 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11121 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);11122 11123 iemInitExec(pVCpu, 0 /*fExecOpts*/);11124 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);11125 Assert(!pVCpu->iem.s.cActiveMappings);11126 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11127 }11128 11129 11130 /**11131 * Interface for HM and EM to emulate the RDMSR instruction.11132 *11133 * @returns Strict VBox status code.11134 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11135 *11136 * @param pVCpu The cross context virtual CPU structure.11137 * @param cbInstr The instruction length in bytes.11138 *11139 * @remarks Not all of the state needs to be synced in. Requires RCX and11140 * (currently) all MSRs.11141 */11142 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)11143 {11144 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11145 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);11146 11147 iemInitExec(pVCpu, 0 /*fExecOpts*/);11148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);11149 Assert(!pVCpu->iem.s.cActiveMappings);11150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11151 }11152 11153 11154 /**11155 * Interface for HM and EM to emulate the WRMSR instruction.11156 *11157 * @returns Strict VBox status code.11158 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11159 *11160 * @param pVCpu The cross context virtual CPU structure.11161 * @param cbInstr The instruction length in bytes.11162 *11163 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,11164 * and (currently) all MSRs.11165 */11166 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)11167 {11168 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11169 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK11170 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);11171 11172 iemInitExec(pVCpu, 0 /*fExecOpts*/);11173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);11174 Assert(!pVCpu->iem.s.cActiveMappings);11175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11176 }11177 11178 11179 /**11180 * Interface for HM and EM to emulate the MONITOR instruction.11181 *11182 * @returns Strict VBox status code.11183 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11184 *11185 * @param pVCpu The cross context virtual CPU structure.11186 * @param cbInstr The instruction length in bytes.11187 *11188 * @remarks Not all of the state needs to be synced in.11189 * @remarks ASSUMES the default segment of DS and no segment override prefixes11190 * are used.11191 */11192 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)11193 {11194 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11195 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);11196 11197 iemInitExec(pVCpu, 0 /*fExecOpts*/);11198 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);11199 Assert(!pVCpu->iem.s.cActiveMappings);11200 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11201 }11202 11203 11204 /**11205 * Interface for HM and EM to emulate the MWAIT instruction.11206 *11207 * @returns Strict VBox status code.11208 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11209 *11210 * @param pVCpu The cross context virtual CPU structure.11211 * @param cbInstr The instruction length in bytes.11212 *11213 * @remarks Not all of the state needs to be synced in.11214 */11215 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)11216 {11217 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11218 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);11219 11220 iemInitExec(pVCpu, 0 /*fExecOpts*/);11221 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);11222 Assert(!pVCpu->iem.s.cActiveMappings);11223 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11224 }11225 11226 11227 /**11228 * Interface for HM and EM to emulate the HLT instruction.11229 *11230 * @returns Strict VBox status code.11231 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11232 *11233 * @param pVCpu The cross context virtual CPU structure.11234 * @param cbInstr The instruction length in bytes.11235 *11236 * @remarks Not all of the state needs to be synced in.11237 */11238 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)11239 {11240 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);11241 11242 iemInitExec(pVCpu, 0 /*fExecOpts*/);11243 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);11244 Assert(!pVCpu->iem.s.cActiveMappings);11245 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11246 }11247 11248 11249 /**11250 * Checks if IEM is in the process of delivering an event (interrupt or11251 * exception).11252 *11253 * @returns true if we're in the process of raising an interrupt or exception,11254 * false otherwise.11255 * @param pVCpu The cross context virtual CPU structure.11256 * @param puVector Where to store the vector associated with the11257 * currently delivered event, optional.11258 * @param pfFlags Where to store th event delivery flags (see11259 * IEM_XCPT_FLAGS_XXX), optional.11260 * @param puErr Where to store the error code associated with the11261 * event, optional.11262 * @param puCr2 Where to store the CR2 associated with the event,11263 * optional.11264 * @remarks The caller should check the flags to determine if the error code and11265 * CR2 are valid for the event.11266 */11267 VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)11268 {11269 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;11270 if (fRaisingXcpt)11271 {11272 if (puVector)11273 *puVector = pVCpu->iem.s.uCurXcpt;11274 if (pfFlags)11275 *pfFlags = pVCpu->iem.s.fCurXcpt;11276 if (puErr)11277 *puErr = pVCpu->iem.s.uCurXcptErr;11278 if (puCr2)11279 *puCr2 = pVCpu->iem.s.uCurXcptCr2;11280 }11281 return fRaisingXcpt;11282 }11283 11284 6787 #ifdef IN_RING3 11285 6788 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllExec-x86.cpp
r108195 r108220 1 1 /* $Id$ */ 2 2 /** @file 3 * IEM - Interpreted Execution Manager - All Contexts.3 * IEM - Interpreted Execution Manager - x86 target, decoded instruction execution. 4 4 */ 5 5 … … 25 25 * SPDX-License-Identifier: GPL-3.0-only 26 26 */ 27 28 29 /** @page pg_iem IEM - Interpreted Execution Manager30 *31 * The interpreted exeuction manager (IEM) is for executing short guest code32 * sequences that are causing too many exits / virtualization traps. It will33 * also be used to interpret single instructions, thus replacing the selective34 * interpreters in EM and IOM.35 *36 * Design goals:37 * - Relatively small footprint, although we favour speed and correctness38 * over size.39 * - Reasonably fast.40 * - Correctly handle lock prefixed instructions.41 * - Complete instruction set - eventually.42 * - Refactorable into a recompiler, maybe.43 * - Replace EMInterpret*.44 *45 * Using the existing disassembler has been considered, however this is thought46 * to conflict with speed as the disassembler chews things a bit too much while47 * leaving us with a somewhat complicated state to interpret afterwards.48 *49 *50 * The current code is very much work in progress. You've been warned!51 *52 *53 * @section sec_iem_fpu_instr FPU Instructions54 *55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the56 * same or equivalent instructions on the host FPU. To make life easy, we also57 * let the FPU prioritize the unmasked exceptions for us. This however, only58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 1359 * for FPU exception delivery, because with CR0.NE=0 there is a window where we60 * can trigger spurious FPU exceptions.61 *62 * The guest FPU state is not loaded into the host CPU and kept there till we63 * leave IEM because the calling conventions have declared an all year open64 * season on much of the FPU state. For instance an innocent looking call to65 * memcpy might end up using a whole bunch of XMM or MM registers if the66 * particular implementation finds it worthwhile.67 *68 *69 * @section sec_iem_logging Logging70 *71 * The IEM code uses the \"IEM\" log group for the main logging. The different72 * logging levels/flags are generally used for the following purposes:73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.74 * - Flow (LogFlow) : Basic enter/exit IEM state info.75 * - Level 2 (Log2) : ?76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.78 * - Level 5 (Log5) : Decoding details.79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.80 * - Level 7 (Log7) : iret++ execution logging.81 * - Level 8 (Log8) :82 * - Level 9 (Log9) :83 * - Level 10 (Log10): TLBs.84 * - Level 11 (Log11): Unmasked FPU exceptions.85 *86 * The \"IEM_MEM\" log group covers most of memory related details logging,87 * except for errors and exceptions:88 * - Level 1 (Log) : Reads.89 * - Level 2 (Log2) : Read fallbacks.90 * - Level 3 (Log3) : MemMap read.91 * - Level 4 (Log4) : MemMap read fallbacks.92 * - Level 5 (Log5) : Writes93 * - Level 6 (Log6) : Write fallbacks.94 * - Level 7 (Log7) : MemMap writes and read-writes.95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.96 * - Level 9 (Log9) : Stack reads.97 * - Level 10 (Log10): Stack read fallbacks.98 * - Level 11 (Log11): Stack writes.99 * - Level 12 (Log12): Stack write fallbacks.100 * - Flow (LogFlow) :101 *102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:103 * - Level 1 (Log) : Errors and other major events.104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)105 * - Level 2 (Log2) : VM exits.106 *107 * The syscall logging level assignments:108 * - Level 1: DOS and BIOS.109 * - Level 2: Windows 3.x110 * - Level 3: Linux.111 */112 113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */114 #ifdef _MSC_VER115 # pragma warning(disable:4505)116 #endif117 27 118 28 … … 127 37 #include <VBox/vmm/iem.h> 128 38 #include <VBox/vmm/cpum.h> 129 #include <VBox/vmm/pdmapic.h> 130 #include <VBox/vmm/pdm.h> 131 #include <VBox/vmm/pgm.h> 39 #include <VBox/vmm/dbgf.h> 132 40 #include <VBox/vmm/iom.h> 133 #include <VBox/vmm/em.h>134 #include <VBox/vmm/hm.h>135 #include <VBox/vmm/nem.h>136 41 #include <VBox/vmm/gcm.h> 137 42 #include <VBox/vmm/gim.h> 138 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM139 # include <VBox/vmm/em.h>140 # include <VBox/vmm/hm_svm.h>141 #endif142 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX143 # include <VBox/vmm/hmvmxinline.h>144 #endif145 #include <VBox/vmm/tm.h>146 #include <VBox/vmm/dbgf.h>147 #include <VBox/vmm/dbgftrace.h>148 43 #include "IEMInternal.h" 149 44 #include <VBox/vmm/vmcc.h> 150 45 #include <VBox/log.h> 151 46 #include <VBox/err.h> 152 #include <VBox/param.h>153 #include <VBox/dis.h>154 #include <iprt/asm-math.h>155 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)156 # include <iprt/asm-amd64-x86.h>157 #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)158 # include <iprt/asm-arm.h>159 #endif160 47 #include <iprt/assert.h> 161 48 #include <iprt/string.h> … … 163 50 164 51 #include "IEMInline.h" 165 166 167 /*********************************************************************************************************************************168 * Structures and Typedefs *169 *********************************************************************************************************************************/170 /**171 * CPU exception classes.172 */173 typedef enum IEMXCPTCLASS174 {175 IEMXCPTCLASS_BENIGN,176 IEMXCPTCLASS_CONTRIBUTORY,177 IEMXCPTCLASS_PAGE_FAULT,178 IEMXCPTCLASS_DOUBLE_FAULT179 } IEMXCPTCLASS;180 181 182 /*********************************************************************************************************************************183 * Global Variables *184 *********************************************************************************************************************************/185 #if defined(IEM_LOG_MEMORY_WRITES)186 /** What IEM just wrote. */187 uint8_t g_abIemWrote[256];188 /** How much IEM just wrote. */189 size_t g_cbIemWrote;190 #endif191 192 193 /*********************************************************************************************************************************194 * Internal Functions *195 *********************************************************************************************************************************/196 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,197 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;198 199 200 /**201 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code202 * path.203 *204 * This will also invalidate TLB entries for any pages with active data205 * breakpoints on them.206 *207 * @returns IEM_F_BRK_PENDING_XXX or zero.208 * @param pVCpu The cross context virtual CPU structure of the209 * calling thread.210 *211 * @note Don't call directly, use iemCalcExecDbgFlags instead.212 */213 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)214 {215 uint32_t fExec = 0;216 217 /*218 * Helper for invalidate the data TLB for breakpoint addresses.219 *220 * This is to make sure any access to the page will always trigger a TLB221 * load for as long as the breakpoint is enabled.222 */223 #ifdef IEM_WITH_DATA_TLB224 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \225 RTGCPTR uTagNoRev = (a_uValue); \226 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \227 /** @todo do large page accounting */ \228 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \229 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \230 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \231 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \232 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \233 } while (0)234 #else235 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)236 #endif237 238 /*239 * Process guest breakpoints.240 */241 #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \242 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \243 { \244 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \245 { \246 case X86_DR7_RW_EO: \247 fExec |= IEM_F_PENDING_BRK_INSTR; \248 break; \249 case X86_DR7_RW_WO: \250 case X86_DR7_RW_RW: \251 fExec |= IEM_F_PENDING_BRK_DATA; \252 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \253 break; \254 case X86_DR7_RW_IO: \255 fExec |= IEM_F_PENDING_BRK_X86_IO; \256 break; \257 } \258 } \259 } while (0)260 261 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];262 if (fGstDr7 & X86_DR7_ENABLED_MASK)263 {264 /** @todo extract more details here to simplify matching later. */265 #ifdef IEM_WITH_DATA_TLB266 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);267 #endif268 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);269 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);270 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);271 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);272 }273 274 /*275 * Process hypervisor breakpoints.276 */277 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);278 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);279 if (fHyperDr7 & X86_DR7_ENABLED_MASK)280 {281 /** @todo extract more details here to simplify matching later. */282 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));283 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));284 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));285 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));286 }287 288 return fExec;289 }290 291 292 /**293 * Initializes the decoder state.294 *295 * iemReInitDecoder is mostly a copy of this function.296 *297 * @param pVCpu The cross context virtual CPU structure of the298 * calling thread.299 * @param fExecOpts Optional execution flags:300 * - IEM_F_BYPASS_HANDLERS301 * - IEM_F_X86_DISREGARD_LOCK302 */303 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)304 {305 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);306 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));313 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));314 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));315 316 /* Execution state: */317 uint32_t fExec;318 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;319 320 /* Decoder state: */321 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */322 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;323 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)324 {325 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */326 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;327 }328 else329 {330 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;331 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;332 }333 pVCpu->iem.s.fPrefixes = 0;334 pVCpu->iem.s.uRexReg = 0;335 pVCpu->iem.s.uRexB = 0;336 pVCpu->iem.s.uRexIndex = 0;337 pVCpu->iem.s.idxPrefix = 0;338 pVCpu->iem.s.uVex3rdReg = 0;339 pVCpu->iem.s.uVexLength = 0;340 pVCpu->iem.s.fEvexStuff = 0;341 pVCpu->iem.s.iEffSeg = X86_SREG_DS;342 #ifdef IEM_WITH_CODE_TLB343 pVCpu->iem.s.pbInstrBuf = NULL;344 pVCpu->iem.s.offInstrNextByte = 0;345 pVCpu->iem.s.offCurInstrStart = 0;346 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF347 pVCpu->iem.s.offOpcode = 0;348 # endif349 # ifdef VBOX_STRICT350 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;351 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;352 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;353 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);354 # endif355 #else356 pVCpu->iem.s.offOpcode = 0;357 pVCpu->iem.s.cbOpcode = 0;358 #endif359 pVCpu->iem.s.offModRm = 0;360 pVCpu->iem.s.cActiveMappings = 0;361 pVCpu->iem.s.iNextMapping = 0;362 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;363 364 #ifdef DBGFTRACE_ENABLED365 switch (IEM_GET_CPU_MODE(pVCpu))366 {367 case IEMMODE_64BIT:368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);369 break;370 case IEMMODE_32BIT:371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);372 break;373 case IEMMODE_16BIT:374 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);375 break;376 }377 #endif378 }379 380 381 /**382 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.383 *384 * This is mostly a copy of iemInitDecoder.385 *386 * @param pVCpu The cross context virtual CPU structure of the calling EMT.387 */388 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)389 {390 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));399 400 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */401 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),402 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));403 404 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);405 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */406 pVCpu->iem.s.enmEffAddrMode = enmMode;407 if (enmMode != IEMMODE_64BIT)408 {409 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */410 pVCpu->iem.s.enmEffOpSize = enmMode;411 }412 else413 {414 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;415 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;416 }417 pVCpu->iem.s.fPrefixes = 0;418 pVCpu->iem.s.uRexReg = 0;419 pVCpu->iem.s.uRexB = 0;420 pVCpu->iem.s.uRexIndex = 0;421 pVCpu->iem.s.idxPrefix = 0;422 pVCpu->iem.s.uVex3rdReg = 0;423 pVCpu->iem.s.uVexLength = 0;424 pVCpu->iem.s.fEvexStuff = 0;425 pVCpu->iem.s.iEffSeg = X86_SREG_DS;426 #ifdef IEM_WITH_CODE_TLB427 if (pVCpu->iem.s.pbInstrBuf)428 {429 uint64_t off = (enmMode == IEMMODE_64BIT430 ? pVCpu->cpum.GstCtx.rip431 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)432 - pVCpu->iem.s.uInstrBufPc;433 if (off < pVCpu->iem.s.cbInstrBufTotal)434 {435 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;436 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;437 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)438 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;439 else440 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;441 }442 else443 {444 pVCpu->iem.s.pbInstrBuf = NULL;445 pVCpu->iem.s.offInstrNextByte = 0;446 pVCpu->iem.s.offCurInstrStart = 0;447 pVCpu->iem.s.cbInstrBuf = 0;448 pVCpu->iem.s.cbInstrBufTotal = 0;449 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;450 }451 }452 else453 {454 pVCpu->iem.s.offInstrNextByte = 0;455 pVCpu->iem.s.offCurInstrStart = 0;456 pVCpu->iem.s.cbInstrBuf = 0;457 pVCpu->iem.s.cbInstrBufTotal = 0;458 # ifdef VBOX_STRICT459 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;460 # endif461 }462 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF463 pVCpu->iem.s.offOpcode = 0;464 # endif465 #else /* !IEM_WITH_CODE_TLB */466 pVCpu->iem.s.cbOpcode = 0;467 pVCpu->iem.s.offOpcode = 0;468 #endif /* !IEM_WITH_CODE_TLB */469 pVCpu->iem.s.offModRm = 0;470 Assert(pVCpu->iem.s.cActiveMappings == 0);471 pVCpu->iem.s.iNextMapping = 0;472 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);473 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));474 475 #ifdef DBGFTRACE_ENABLED476 switch (enmMode)477 {478 case IEMMODE_64BIT:479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);480 break;481 case IEMMODE_32BIT:482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);483 break;484 case IEMMODE_16BIT:485 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);486 break;487 }488 #endif489 }490 491 492 493 /**494 * Prefetch opcodes the first time when starting executing.495 *496 * @returns Strict VBox status code.497 * @param pVCpu The cross context virtual CPU structure of the498 * calling thread.499 * @param fExecOpts Optional execution flags:500 * - IEM_F_BYPASS_HANDLERS501 * - IEM_F_X86_DISREGARD_LOCK502 */503 static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT504 {505 iemInitDecoder(pVCpu, fExecOpts);506 507 #ifndef IEM_WITH_CODE_TLB508 /*509 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.510 *511 * First translate CS:rIP to a physical address.512 *513 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch514 * all relevant bytes from the first page, as it ASSUMES it's only ever515 * called for dealing with CS.LIM, page crossing and instructions that516 * are too long.517 */518 uint32_t cbToTryRead;519 RTGCPTR GCPtrPC;520 if (IEM_IS_64BIT_CODE(pVCpu))521 {522 cbToTryRead = GUEST_PAGE_SIZE;523 GCPtrPC = pVCpu->cpum.GstCtx.rip;524 if (IEM_IS_CANONICAL(GCPtrPC))525 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);526 else527 return iemRaiseGeneralProtectionFault0(pVCpu);528 }529 else530 {531 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;532 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));533 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)534 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;535 else536 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);537 if (cbToTryRead) { /* likely */ }538 else /* overflowed */539 {540 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);541 cbToTryRead = UINT32_MAX;542 }543 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;544 Assert(GCPtrPC <= UINT32_MAX);545 }546 547 PGMPTWALKFAST WalkFast;548 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,549 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,550 &WalkFast);551 if (RT_SUCCESS(rc))552 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);553 else554 {555 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));556 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT557 /** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't558 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */559 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)560 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);561 # endif562 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);563 }564 #if 0565 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }566 else567 {568 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));569 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT570 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/571 # error completely wrong572 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)573 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);574 # endif575 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);576 }577 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }578 else579 {580 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));581 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT582 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/583 # error completely wrong.584 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)585 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);586 # endif587 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);588 }589 #else590 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);591 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));592 #endif593 RTGCPHYS const GCPhys = WalkFast.GCPhys;594 595 /*596 * Read the bytes at this address.597 */598 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);599 if (cbToTryRead > cbLeftOnPage)600 cbToTryRead = cbLeftOnPage;601 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))602 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);603 604 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))605 {606 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);607 if (RT_LIKELY(rcStrict == VINF_SUCCESS))608 { /* likely */ }609 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))610 {611 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",612 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));613 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);614 }615 else616 {617 Log((RT_SUCCESS(rcStrict)618 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"619 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",620 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));621 return rcStrict;622 }623 }624 else625 {626 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);627 if (RT_SUCCESS(rc))628 { /* likely */ }629 else630 {631 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",632 GCPtrPC, GCPhys, rc, cbToTryRead));633 return rc;634 }635 }636 pVCpu->iem.s.cbOpcode = cbToTryRead;637 #endif /* !IEM_WITH_CODE_TLB */638 return VINF_SUCCESS;639 }640 641 642 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)643 /**644 * Helper for doing large page accounting at TLB load time.645 */646 template<bool const a_fGlobal>647 DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)648 {649 if (a_fGlobal)650 pTlb->cTlbGlobalLargePageCurLoads++;651 else652 pTlb->cTlbNonGlobalLargePageCurLoads++;653 654 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP655 RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;656 ASMBitSet(pTlb->bmLargePage, idxBit);657 # endif658 659 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);660 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;661 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal662 ? &pTlb->GlobalLargePageRange663 : &pTlb->NonGlobalLargePageRange;664 uTagNoRev &= ~(RTGCPTR)fMask;665 if (uTagNoRev < pRange->uFirstTag)666 pRange->uFirstTag = uTagNoRev;667 668 uTagNoRev |= fMask;669 if (uTagNoRev > pRange->uLastTag)670 pRange->uLastTag = uTagNoRev;671 672 RT_NOREF_PV(pVCpu);673 }674 #endif675 676 677 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)678 /**679 * Worker for iemTlbInvalidateAll.680 */681 template<bool a_fGlobal>682 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)683 {684 if (!a_fGlobal)685 pTlb->cTlsFlushes++;686 else687 pTlb->cTlsGlobalFlushes++;688 689 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;690 if (RT_LIKELY(pTlb->uTlbRevision != 0))691 { /* very likely */ }692 else693 {694 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;695 pTlb->cTlbRevisionRollovers++;696 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;697 while (i-- > 0)698 pTlb->aEntries[i * 2].uTag = 0;699 }700 701 pTlb->cTlbNonGlobalLargePageCurLoads = 0;702 pTlb->NonGlobalLargePageRange.uLastTag = 0;703 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;704 705 if (a_fGlobal)706 {707 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;708 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))709 { /* very likely */ }710 else711 {712 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;713 pTlb->cTlbRevisionRollovers++;714 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;715 while (i-- > 0)716 pTlb->aEntries[i * 2 + 1].uTag = 0;717 }718 719 pTlb->cTlbGlobalLargePageCurLoads = 0;720 pTlb->GlobalLargePageRange.uLastTag = 0;721 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;722 }723 }724 #endif725 726 727 /**728 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.729 */730 template<bool a_fGlobal>731 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)732 {733 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)734 Log10(("IEMTlbInvalidateAll\n"));735 736 # ifdef IEM_WITH_CODE_TLB737 pVCpu->iem.s.cbInstrBufTotal = 0;738 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);739 if (a_fGlobal)740 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);741 else742 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);743 # endif744 745 # ifdef IEM_WITH_DATA_TLB746 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);747 if (a_fGlobal)748 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);749 else750 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);751 # endif752 #else753 RT_NOREF(pVCpu);754 #endif755 }756 757 758 /**759 * Invalidates non-global the IEM TLB entries.760 *761 * This is called internally as well as by PGM when moving GC mappings.762 *763 * @param pVCpu The cross context virtual CPU structure of the calling764 * thread.765 */766 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)767 {768 iemTlbInvalidateAll<false>(pVCpu);769 }770 771 772 /**773 * Invalidates all the IEM TLB entries.774 *775 * This is called internally as well as by PGM when moving GC mappings.776 *777 * @param pVCpu The cross context virtual CPU structure of the calling778 * thread.779 */780 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)781 {782 iemTlbInvalidateAll<true>(pVCpu);783 }784 785 786 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)787 788 /** @todo graduate this to cdefs.h or asm-mem.h. */789 # ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */790 # undef RT_CACHELINE_SIZE791 # define RT_CACHELINE_SIZE 128792 # endif793 794 # if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))795 # define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)796 # elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))797 # define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))798 # elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)799 # define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)800 # else801 # define MY_PREFETCH(a_pvAddr) ((void)0)802 # endif803 # if 0804 # undef MY_PREFETCH805 # define MY_PREFETCH(a_pvAddr) ((void)0)806 # endif807 808 /** @def MY_PREFETCH_64809 * 64 byte prefetch hint, could be more depending on cache line size. */810 /** @def MY_PREFETCH_128811 * 128 byte prefetch hint. */812 /** @def MY_PREFETCH_256813 * 256 byte prefetch hint. */814 # if RT_CACHELINE_SIZE >= 128815 /* 128 byte cache lines */816 # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)817 # define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)818 # define MY_PREFETCH_256(a_pvAddr) do { \819 MY_PREFETCH(a_pvAddr); \820 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \821 } while (0)822 # else823 /* 64 byte cache lines */824 # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)825 # define MY_PREFETCH_128(a_pvAddr) do { \826 MY_PREFETCH(a_pvAddr); \827 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \828 } while (0)829 # define MY_PREFETCH_256(a_pvAddr) do { \830 MY_PREFETCH(a_pvAddr); \831 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \832 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \833 MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \834 } while (0)835 # endif836 837 template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>838 DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,839 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT840 {841 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);842 AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */843 844 if (a_fGlobal)845 pTlb->cTlbInvlPgLargeGlobal += 1;846 if (a_fNonGlobal)847 pTlb->cTlbInvlPgLargeNonGlobal += 1;848 849 /*850 * Set up the scan.851 *852 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map853 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]854 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask855 * that fold large page offsets 1MB-2MB into the 0-1MB range.856 *857 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff858 *859 * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for860 * relevant host architectures.861 */862 /** @todo benchmark this code from the guest side. */863 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);864 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP865 uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;866 uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64867 : IEMTLB_ENTRY_COUNT * 2 / 64;868 #else869 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;870 MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);871 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;872 #endif873 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0874 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)875 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));876 877 /*878 * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.879 * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.880 */881 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);882 if ( !a_fDataTlb883 && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))884 pVCpu->iem.s.cbInstrBufTotal = 0;885 886 /*887 * Combine TAG values with the TLB revisions.888 */889 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;890 if (a_fNonGlobal)891 GCPtrTag |= pTlb->uTlbRevision;892 893 /*894 * Do the scanning.895 */896 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP897 uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX898 : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);899 /* Scan bitmap entries (64 bits at the time): */900 for (;;)901 {902 # if 1903 uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;904 if (bmEntry)905 {906 /* Scan the non-zero 64-bit value in groups of 8 bits: */907 uint64_t bmToClear = 0;908 uintptr_t idxEven = idxBitmap * 64;909 uint32_t idxTag = 0;910 for (;;)911 {912 if (bmEntry & 0xff)913 {914 # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \915 if (a_fNonGlobal) \916 { \917 if (bmEntry & a_bmNonGlobal) \918 { \919 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \920 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \921 { \922 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \923 pTlb->aEntries[a_idxEvenIter].GCPhys, \924 a_idxEvenIter, a_fDataTlb); \925 pTlb->aEntries[a_idxEvenIter].uTag = 0; \926 bmToClearSub8 |= a_bmNonGlobal; \927 } \928 } \929 else \930 Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\931 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \932 != (GCPtrTag & IEMTLB_REVISION_MASK)); \933 } \934 if (a_fGlobal) \935 { \936 if (bmEntry & a_bmGlobal) \937 { \938 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \939 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \940 { \941 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \942 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \943 a_idxEvenIter + 1, a_fDataTlb); \944 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \945 bmToClearSub8 |= a_bmGlobal; \946 } \947 } \948 else \949 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\950 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \951 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \952 }953 uint64_t bmToClearSub8 = 0;954 ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)955 ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)956 ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)957 ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)958 bmToClear |= bmToClearSub8 << (idxTag * 2);959 # undef ONE_PAIR960 }961 962 /* advance to the next 8 bits. */963 bmEntry >>= 8;964 if (!bmEntry)965 break;966 idxEven += 8;967 idxTag += 4;968 }969 970 /* Clear the large page flags we covered. */971 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;972 }973 # else974 uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;975 if (bmEntry)976 {977 /* Scan the non-zero 64-bit value completely unrolled: */978 uintptr_t const idxEven = idxBitmap * 64;979 uint64_t bmToClear = 0;980 # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \981 if (a_fNonGlobal) \982 { \983 if (bmEntry & a_bmNonGlobal) \984 { \985 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \986 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \987 { \988 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \989 pTlb->aEntries[a_idxEvenIter].GCPhys, \990 a_idxEvenIter, a_fDataTlb); \991 pTlb->aEntries[a_idxEvenIter].uTag = 0; \992 bmToClear |= a_bmNonGlobal; \993 } \994 } \995 else \996 Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\997 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \998 != (GCPtrTag & IEMTLB_REVISION_MASK)); \999 } \1000 if (a_fGlobal) \1001 { \1002 if (bmEntry & a_bmGlobal) \1003 { \1004 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \1005 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \1006 { \1007 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \1008 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \1009 a_idxEvenIter + 1, a_fDataTlb); \1010 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \1011 bmToClear |= a_bmGlobal; \1012 } \1013 } \1014 else \1015 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\1016 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \1017 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \1018 } ((void)0)1019 # define FOUR_PAIRS(a_iByte, a_cShift) \1020 ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \1021 ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \1022 ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \1023 ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)1024 if (bmEntry & (uint32_t)UINT16_MAX)1025 {1026 FOUR_PAIRS(0, 0);1027 FOUR_PAIRS(1, 8);1028 }1029 if (bmEntry & ((uint32_t)UINT16_MAX << 16))1030 {1031 FOUR_PAIRS(2, 16);1032 FOUR_PAIRS(3, 24);1033 }1034 if (bmEntry & ((uint64_t)UINT16_MAX << 32))1035 {1036 FOUR_PAIRS(4, 32);1037 FOUR_PAIRS(5, 40);1038 }1039 if (bmEntry & ((uint64_t)UINT16_MAX << 16))1040 {1041 FOUR_PAIRS(6, 48);1042 FOUR_PAIRS(7, 56);1043 }1044 # undef FOUR_PAIRS1045 1046 /* Clear the large page flags we covered. */1047 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;1048 }1049 # endif1050 1051 /* advance */1052 idxBitmap++;1053 if (idxBitmap >= idxBitmapEnd)1054 break;1055 if (a_fNonGlobal)1056 GCPtrTag += 32;1057 if (a_fGlobal)1058 GCPtrTagGlob += 32;1059 }1060 1061 #else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */1062 1063 for (; idxEven < idxEvenEnd; idxEven += 8)1064 {1065 # define ONE_ITERATION(a_idxEvenIter) \1066 if (a_fNonGlobal) \1067 { \1068 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \1069 { \1070 if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \1071 { \1072 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \1073 a_idxEvenIter, a_fDataTlb); \1074 pTlb->aEntries[a_idxEvenIter].uTag = 0; \1075 } \1076 } \1077 GCPtrTag++; \1078 } \1079 \1080 if (a_fGlobal) \1081 { \1082 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \1083 { \1084 if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \1085 { \1086 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \1087 a_idxEvenIter + 1, a_fDataTlb); \1088 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \1089 } \1090 } \1091 GCPtrTagGlob++; \1092 }1093 if (idxEven < idxEvenEnd - 4)1094 MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);1095 ONE_ITERATION(idxEven)1096 ONE_ITERATION(idxEven + 2)1097 ONE_ITERATION(idxEven + 4)1098 ONE_ITERATION(idxEven + 6)1099 # undef ONE_ITERATION1100 }1101 #endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */1102 }1103 1104 template<bool const a_fDataTlb, bool const a_f2MbLargePage>1105 DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,1106 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT1107 {1108 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);1109 1110 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);1111 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag1112 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)1113 {1114 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag1115 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)1116 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1117 else1118 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1119 }1120 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag1121 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)1122 {1123 /* Large pages aren't as likely in the non-global TLB half. */1124 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);1125 }1126 else1127 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1128 }1129 1130 template<bool const a_fDataTlb>1131 DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT1132 {1133 pTlb->cTlbInvlPg += 1;1134 1135 /*1136 * Flush the entry pair.1137 */1138 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))1139 {1140 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);1141 pTlb->aEntries[idxEven].uTag = 0;1142 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))1143 pVCpu->iem.s.cbInstrBufTotal = 0;1144 }1145 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))1146 {1147 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);1148 pTlb->aEntries[idxEven + 1].uTag = 0;1149 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))1150 pVCpu->iem.s.cbInstrBufTotal = 0;1151 }1152 1153 /*1154 * If there are (or has been) large pages in the TLB, we must check if the1155 * address being flushed may involve one of those, as then we'd have to1156 * scan for entries relating to the same page and flush those as well.1157 */1158 # if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */1159 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)1160 # else1161 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)1162 # endif1163 {1164 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);1165 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)1166 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1167 else1168 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1169 }1170 }1171 1172 #endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */1173 1174 /**1175 * Invalidates a page in the TLBs.1176 *1177 * @param pVCpu The cross context virtual CPU structure of the calling1178 * thread.1179 * @param GCPtr The address of the page to invalidate1180 * @thread EMT(pVCpu)1181 */1182 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)1183 {1184 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);1185 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1186 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));1187 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);1188 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));1189 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);1190 1191 # ifdef IEM_WITH_CODE_TLB1192 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);1193 # endif1194 # ifdef IEM_WITH_DATA_TLB1195 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);1196 # endif1197 #else1198 NOREF(pVCpu); NOREF(GCPtr);1199 #endif1200 }1201 1202 1203 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1204 /**1205 * Invalid both TLBs slow fashion following a rollover.1206 *1207 * Worker for IEMTlbInvalidateAllPhysical,1208 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,1209 * iemMemMapJmp and others.1210 *1211 * @thread EMT(pVCpu)1212 */1213 static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)1214 {1215 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));1216 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);1217 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);1218 1219 unsigned i;1220 # ifdef IEM_WITH_CODE_TLB1221 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);1222 while (i-- > 0)1223 {1224 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;1225 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ1226 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);1227 }1228 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;1229 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1230 # endif1231 # ifdef IEM_WITH_DATA_TLB1232 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);1233 while (i-- > 0)1234 {1235 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;1236 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ1237 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);1238 }1239 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;1240 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1241 # endif1242 1243 }1244 #endif1245 1246 1247 /**1248 * Invalidates the host physical aspects of the IEM TLBs.1249 *1250 * This is called internally as well as by PGM when moving GC mappings.1251 *1252 * @param pVCpu The cross context virtual CPU structure of the calling1253 * thread.1254 * @note Currently not used.1255 */1256 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)1257 {1258 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1259 /* Note! This probably won't end up looking exactly like this, but it give an idea... */1260 Log10(("IEMTlbInvalidateAllPhysical\n"));1261 1262 # ifdef IEM_WITH_CODE_TLB1263 pVCpu->iem.s.cbInstrBufTotal = 0;1264 # endif1265 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;1266 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))1267 {1268 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;1269 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1270 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;1271 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1272 }1273 else1274 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1275 #else1276 NOREF(pVCpu);1277 #endif1278 }1279 1280 1281 /**1282 * Invalidates the host physical aspects of the IEM TLBs.1283 *1284 * This is called internally as well as by PGM when moving GC mappings.1285 *1286 * @param pVM The cross context VM structure.1287 * @param idCpuCaller The ID of the calling EMT if available to the caller,1288 * otherwise NIL_VMCPUID.1289 * @param enmReason The reason we're called.1290 *1291 * @remarks Caller holds the PGM lock.1292 */1293 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)1294 {1295 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1296 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);1297 if (pVCpuCaller)1298 VMCPU_ASSERT_EMT(pVCpuCaller);1299 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);1300 1301 VMCC_FOR_EACH_VMCPU(pVM)1302 {1303 # ifdef IEM_WITH_CODE_TLB1304 if (pVCpuCaller == pVCpu)1305 pVCpu->iem.s.cbInstrBufTotal = 0;1306 # endif1307 1308 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);1309 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;1310 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))1311 { /* likely */}1312 else if (pVCpuCaller != pVCpu)1313 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;1314 else1315 {1316 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1317 continue;1318 }1319 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))1320 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1321 1322 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))1323 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1324 }1325 VMCC_FOR_EACH_VMCPU_END(pVM);1326 1327 #else1328 RT_NOREF(pVM, idCpuCaller, enmReason);1329 #endif1330 }1331 1332 1333 /**1334 * Flushes the prefetch buffer, light version.1335 */1336 void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)1337 {1338 #ifndef IEM_WITH_CODE_TLB1339 pVCpu->iem.s.cbOpcode = cbInstr;1340 #else1341 RT_NOREF(pVCpu, cbInstr);1342 #endif1343 }1344 1345 1346 /**1347 * Flushes the prefetch buffer, heavy version.1348 */1349 void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)1350 {1351 #ifndef IEM_WITH_CODE_TLB1352 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */1353 #elif 11354 pVCpu->iem.s.cbInstrBufTotal = 0;1355 RT_NOREF(cbInstr);1356 #else1357 RT_NOREF(pVCpu, cbInstr);1358 #endif1359 }1360 1361 1362 1363 #ifdef IEM_WITH_CODE_TLB1364 1365 /**1366 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on1367 * failure and jumps.1368 *1369 * We end up here for a number of reasons:1370 * - pbInstrBuf isn't yet initialized.1371 * - Advancing beyond the buffer boundrary (e.g. cross page).1372 * - Advancing beyond the CS segment limit.1373 * - Fetching from non-mappable page (e.g. MMIO).1374 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).1375 *1376 * @param pVCpu The cross context virtual CPU structure of the1377 * calling thread.1378 * @param pvDst Where to return the bytes.1379 * @param cbDst Number of bytes to read. A value of zero is1380 * allowed for initializing pbInstrBuf (the1381 * recompiler does this). In this case it is best1382 * to set pbInstrBuf to NULL prior to the call.1383 */1384 void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP1385 {1386 # ifdef IN_RING31387 for (;;)1388 {1389 Assert(cbDst <= 8);1390 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;1391 1392 /*1393 * We might have a partial buffer match, deal with that first to make the1394 * rest simpler. This is the first part of the cross page/buffer case.1395 */1396 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;1397 if (pbInstrBuf != NULL)1398 {1399 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */1400 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;1401 if (offBuf < cbInstrBuf)1402 {1403 Assert(offBuf + cbDst > cbInstrBuf);1404 uint32_t const cbCopy = cbInstrBuf - offBuf;1405 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);1406 1407 cbDst -= cbCopy;1408 pvDst = (uint8_t *)pvDst + cbCopy;1409 offBuf += cbCopy;1410 }1411 }1412 1413 /*1414 * Check segment limit, figuring how much we're allowed to access at this point.1415 *1416 * We will fault immediately if RIP is past the segment limit / in non-canonical1417 * territory. If we do continue, there are one or more bytes to read before we1418 * end up in trouble and we need to do that first before faulting.1419 */1420 RTGCPTR GCPtrFirst;1421 uint32_t cbMaxRead;1422 if (IEM_IS_64BIT_CODE(pVCpu))1423 {1424 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1425 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))1426 { /* likely */ }1427 else1428 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1429 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);1430 }1431 else1432 {1433 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1434 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */1435 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))1436 { /* likely */ }1437 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */1438 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1439 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;1440 if (cbMaxRead != 0)1441 { /* likely */ }1442 else1443 {1444 /* Overflowed because address is 0 and limit is max. */1445 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);1446 cbMaxRead = X86_PAGE_SIZE;1447 }1448 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;1449 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);1450 if (cbMaxRead2 < cbMaxRead)1451 cbMaxRead = cbMaxRead2;1452 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */1453 }1454 1455 /*1456 * Get the TLB entry for this piece of code.1457 */1458 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);1459 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);1460 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)1461 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))1462 {1463 /* likely when executing lots of code, otherwise unlikely */1464 # ifdef IEM_WITH_TLB_STATISTICS1465 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;1466 # endif1467 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));1468 1469 /* Check TLB page table level access flags. */1470 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))1471 {1472 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)1473 {1474 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));1475 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1476 }1477 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))1478 {1479 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));1480 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1481 }1482 }1483 1484 /* Look up the physical page info if necessary. */1485 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)1486 { /* not necessary */ }1487 else1488 {1489 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))1490 { /* likely */ }1491 else1492 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1493 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;1494 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,1495 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);1496 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));1497 }1498 }1499 else1500 {1501 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;1502 1503 /* This page table walking will set A bits as required by the access while performing the walk.1504 ASSUMES these are set when the address is translated rather than on commit... */1505 /** @todo testcase: check when A bits are actually set by the CPU for code. */1506 PGMPTWALKFAST WalkFast;1507 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,1508 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,1509 &WalkFast);1510 if (RT_SUCCESS(rc))1511 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);1512 else1513 {1514 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1515 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */1516 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));1517 # endif1518 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));1519 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);1520 }1521 1522 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);1523 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)1524 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */1525 {1526 pTlbe--;1527 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;1528 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)1529 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));1530 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP1531 else1532 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));1533 # endif1534 }1535 else1536 {1537 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;1538 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;1539 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)1540 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));1541 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP1542 else1543 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);1544 # endif1545 }1546 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))1547 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/1548 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);1549 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;1550 pTlbe->GCPhys = GCPhysPg;1551 pTlbe->pbMappingR3 = NULL;1552 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));1553 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);1554 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));1555 1556 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))1557 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);1558 else1559 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);1560 1561 /* Resolve the physical address. */1562 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))1563 { /* likely */ }1564 else1565 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1566 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));1567 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,1568 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);1569 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));1570 }1571 1572 # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */1573 /*1574 * Try do a direct read using the pbMappingR3 pointer.1575 * Note! Do not recheck the physical TLB revision number here as we have the1576 * wrong response to changes in the else case. If someone is updating1577 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine1578 * pretending we always won the race.1579 */1580 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))1581 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)1582 {1583 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);1584 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;1585 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)1586 {1587 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);1588 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;1589 }1590 else1591 {1592 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;1593 if (cbInstr + (uint32_t)cbDst <= 15)1594 {1595 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;1596 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);1597 }1598 else1599 {1600 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",1601 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));1602 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1603 }1604 }1605 if (cbDst <= cbMaxRead)1606 {1607 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */1608 # if 0 /* unused */1609 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;1610 # endif1611 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;1612 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;1613 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;1614 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;1615 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */1616 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);1617 else1618 Assert(!pvDst);1619 return;1620 }1621 pVCpu->iem.s.pbInstrBuf = NULL;1622 1623 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);1624 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;1625 }1626 # else1627 # error "refactor as needed"1628 /*1629 * If there is no special read handling, so we can read a bit more and1630 * put it in the prefetch buffer.1631 */1632 if ( cbDst < cbMaxRead1633 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)1634 {1635 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,1636 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);1637 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1638 { /* likely */ }1639 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1640 {1641 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1642 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1643 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1644 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));1645 }1646 else1647 {1648 Log((RT_SUCCESS(rcStrict)1649 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1650 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1651 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1652 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1653 }1654 }1655 # endif1656 /*1657 * Special read handling, so only read exactly what's needed.1658 * This is a highly unlikely scenario.1659 */1660 else1661 {1662 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;1663 1664 /* Check instruction length. */1665 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;1666 if (RT_LIKELY(cbInstr + cbDst <= 15))1667 { /* likely */ }1668 else1669 {1670 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",1671 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));1672 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1673 }1674 1675 /* Do the reading. */1676 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);1677 if (cbToRead > 0)1678 {1679 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),1680 pvDst, cbToRead, PGMACCESSORIGIN_IEM);1681 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1682 { /* likely */ }1683 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1684 {1685 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1686 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));1687 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1688 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));1689 }1690 else1691 {1692 Log((RT_SUCCESS(rcStrict)1693 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1694 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1695 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));1696 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1697 }1698 }1699 1700 /* Update the state and probably return. */1701 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);1702 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;1703 # if 0 /* unused */1704 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;1705 # endif1706 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);1707 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;1708 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;1709 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */1710 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;1711 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;1712 pVCpu->iem.s.pbInstrBuf = NULL;1713 if (cbToRead == cbDst)1714 return;1715 Assert(cbToRead == cbMaxRead);1716 }1717 1718 /*1719 * More to read, loop.1720 */1721 cbDst -= cbMaxRead;1722 pvDst = (uint8_t *)pvDst + cbMaxRead;1723 }1724 # else /* !IN_RING3 */1725 RT_NOREF(pvDst, cbDst);1726 if (pvDst || cbDst)1727 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);1728 # endif /* !IN_RING3 */1729 }1730 1731 #else /* !IEM_WITH_CODE_TLB */1732 1733 /**1734 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate1735 * exception if it fails.1736 *1737 * @returns Strict VBox status code.1738 * @param pVCpu The cross context virtual CPU structure of the1739 * calling thread.1740 * @param cbMin The minimum number of bytes relative offOpcode1741 * that must be read.1742 */1743 VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT1744 {1745 /*1746 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.1747 *1748 * First translate CS:rIP to a physical address.1749 */1750 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;1751 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;1752 uint8_t const cbLeft = cbOpcode - offOpcode;1753 Assert(cbLeft < cbMin);1754 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));1755 1756 uint32_t cbToTryRead;1757 RTGCPTR GCPtrNext;1758 if (IEM_IS_64BIT_CODE(pVCpu))1759 {1760 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;1761 if (!IEM_IS_CANONICAL(GCPtrNext))1762 return iemRaiseGeneralProtectionFault0(pVCpu);1763 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);1764 }1765 else1766 {1767 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;1768 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */1769 GCPtrNext32 += cbOpcode;1770 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)1771 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */1772 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1773 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;1774 if (!cbToTryRead) /* overflowed */1775 {1776 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);1777 cbToTryRead = UINT32_MAX;1778 /** @todo check out wrapping around the code segment. */1779 }1780 if (cbToTryRead < cbMin - cbLeft)1781 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1782 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;1783 1784 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);1785 if (cbToTryRead > cbLeftOnPage)1786 cbToTryRead = cbLeftOnPage;1787 }1788 1789 /* Restrict to opcode buffer space.1790 1791 We're making ASSUMPTIONS here based on work done previously in1792 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will1793 be fetched in case of an instruction crossing two pages. */1794 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)1795 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;1796 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))1797 { /* likely */ }1798 else1799 {1800 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",1801 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));1802 return iemRaiseGeneralProtectionFault0(pVCpu);1803 }1804 1805 PGMPTWALKFAST WalkFast;1806 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,1807 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,1808 &WalkFast);1809 if (RT_SUCCESS(rc))1810 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);1811 else1812 {1813 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));1814 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1815 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)1816 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);1817 #endif1818 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);1819 }1820 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);1821 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));1822 1823 RTGCPHYS const GCPhys = WalkFast.GCPhys;1824 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));1825 1826 /*1827 * Read the bytes at this address.1828 *1829 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,1830 * and since PATM should only patch the start of an instruction there1831 * should be no need to check again here.1832 */1833 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))1834 {1835 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],1836 cbToTryRead, PGMACCESSORIGIN_IEM);1837 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1838 { /* likely */ }1839 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1840 {1841 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1842 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1843 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1844 }1845 else1846 {1847 Log((RT_SUCCESS(rcStrict)1848 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1849 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1850 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1851 return rcStrict;1852 }1853 }1854 else1855 {1856 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);1857 if (RT_SUCCESS(rc))1858 { /* likely */ }1859 else1860 {1861 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));1862 return rc;1863 }1864 }1865 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;1866 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));1867 1868 return VINF_SUCCESS;1869 }1870 1871 #endif /* !IEM_WITH_CODE_TLB */1872 #ifndef IEM_WITH_SETJMP1873 1874 /**1875 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.1876 *1877 * @returns Strict VBox status code.1878 * @param pVCpu The cross context virtual CPU structure of the1879 * calling thread.1880 * @param pb Where to return the opcode byte.1881 */1882 VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT1883 {1884 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);1885 if (rcStrict == VINF_SUCCESS)1886 {1887 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1888 *pb = pVCpu->iem.s.abOpcode[offOpcode];1889 pVCpu->iem.s.offOpcode = offOpcode + 1;1890 }1891 else1892 *pb = 0;1893 return rcStrict;1894 }1895 1896 #else /* IEM_WITH_SETJMP */1897 1898 /**1899 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.1900 *1901 * @returns The opcode byte.1902 * @param pVCpu The cross context virtual CPU structure of the calling thread.1903 */1904 uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP1905 {1906 # ifdef IEM_WITH_CODE_TLB1907 uint8_t u8;1908 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);1909 return u8;1910 # else1911 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);1912 if (rcStrict == VINF_SUCCESS)1913 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];1914 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1915 # endif1916 }1917 1918 #endif /* IEM_WITH_SETJMP */1919 1920 #ifndef IEM_WITH_SETJMP1921 1922 /**1923 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.1924 *1925 * @returns Strict VBox status code.1926 * @param pVCpu The cross context virtual CPU structure of the calling thread.1927 * @param pu16 Where to return the opcode dword.1928 */1929 VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT1930 {1931 uint8_t u8;1932 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1933 if (rcStrict == VINF_SUCCESS)1934 *pu16 = (int8_t)u8;1935 return rcStrict;1936 }1937 1938 1939 /**1940 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.1941 *1942 * @returns Strict VBox status code.1943 * @param pVCpu The cross context virtual CPU structure of the calling thread.1944 * @param pu32 Where to return the opcode dword.1945 */1946 VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT1947 {1948 uint8_t u8;1949 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1950 if (rcStrict == VINF_SUCCESS)1951 *pu32 = (int8_t)u8;1952 return rcStrict;1953 }1954 1955 1956 /**1957 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.1958 *1959 * @returns Strict VBox status code.1960 * @param pVCpu The cross context virtual CPU structure of the calling thread.1961 * @param pu64 Where to return the opcode qword.1962 */1963 VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT1964 {1965 uint8_t u8;1966 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1967 if (rcStrict == VINF_SUCCESS)1968 *pu64 = (int8_t)u8;1969 return rcStrict;1970 }1971 1972 #endif /* !IEM_WITH_SETJMP */1973 1974 1975 #ifndef IEM_WITH_SETJMP1976 1977 /**1978 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.1979 *1980 * @returns Strict VBox status code.1981 * @param pVCpu The cross context virtual CPU structure of the calling thread.1982 * @param pu16 Where to return the opcode word.1983 */1984 VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT1985 {1986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);1987 if (rcStrict == VINF_SUCCESS)1988 {1989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1990 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1991 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];1992 # else1993 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);1994 # endif1995 pVCpu->iem.s.offOpcode = offOpcode + 2;1996 }1997 else1998 *pu16 = 0;1999 return rcStrict;2000 }2001 2002 #else /* IEM_WITH_SETJMP */2003 2004 /**2005 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error2006 *2007 * @returns The opcode word.2008 * @param pVCpu The cross context virtual CPU structure of the calling thread.2009 */2010 uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2011 {2012 # ifdef IEM_WITH_CODE_TLB2013 uint16_t u16;2014 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);2015 return u16;2016 # else2017 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2018 if (rcStrict == VINF_SUCCESS)2019 {2020 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2021 pVCpu->iem.s.offOpcode += 2;2022 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2023 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2024 # else2025 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2026 # endif2027 }2028 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2029 # endif2030 }2031 2032 #endif /* IEM_WITH_SETJMP */2033 2034 #ifndef IEM_WITH_SETJMP2035 2036 /**2037 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.2038 *2039 * @returns Strict VBox status code.2040 * @param pVCpu The cross context virtual CPU structure of the calling thread.2041 * @param pu32 Where to return the opcode double word.2042 */2043 VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT2044 {2045 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2046 if (rcStrict == VINF_SUCCESS)2047 {2048 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2049 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2050 pVCpu->iem.s.offOpcode = offOpcode + 2;2051 }2052 else2053 *pu32 = 0;2054 return rcStrict;2055 }2056 2057 2058 /**2059 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.2060 *2061 * @returns Strict VBox status code.2062 * @param pVCpu The cross context virtual CPU structure of the calling thread.2063 * @param pu64 Where to return the opcode quad word.2064 */2065 VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2066 {2067 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2068 if (rcStrict == VINF_SUCCESS)2069 {2070 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2071 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2072 pVCpu->iem.s.offOpcode = offOpcode + 2;2073 }2074 else2075 *pu64 = 0;2076 return rcStrict;2077 }2078 2079 #endif /* !IEM_WITH_SETJMP */2080 2081 #ifndef IEM_WITH_SETJMP2082 2083 /**2084 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.2085 *2086 * @returns Strict VBox status code.2087 * @param pVCpu The cross context virtual CPU structure of the calling thread.2088 * @param pu32 Where to return the opcode dword.2089 */2090 VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT2091 {2092 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2093 if (rcStrict == VINF_SUCCESS)2094 {2095 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2096 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2097 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2098 # else2099 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2100 pVCpu->iem.s.abOpcode[offOpcode + 1],2101 pVCpu->iem.s.abOpcode[offOpcode + 2],2102 pVCpu->iem.s.abOpcode[offOpcode + 3]);2103 # endif2104 pVCpu->iem.s.offOpcode = offOpcode + 4;2105 }2106 else2107 *pu32 = 0;2108 return rcStrict;2109 }2110 2111 #else /* IEM_WITH_SETJMP */2112 2113 /**2114 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.2115 *2116 * @returns The opcode dword.2117 * @param pVCpu The cross context virtual CPU structure of the calling thread.2118 */2119 uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2120 {2121 # ifdef IEM_WITH_CODE_TLB2122 uint32_t u32;2123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);2124 return u32;2125 # else2126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2127 if (rcStrict == VINF_SUCCESS)2128 {2129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2130 pVCpu->iem.s.offOpcode = offOpcode + 4;2131 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2132 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2133 # else2134 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2135 pVCpu->iem.s.abOpcode[offOpcode + 1],2136 pVCpu->iem.s.abOpcode[offOpcode + 2],2137 pVCpu->iem.s.abOpcode[offOpcode + 3]);2138 # endif2139 }2140 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2141 # endif2142 }2143 2144 #endif /* IEM_WITH_SETJMP */2145 2146 #ifndef IEM_WITH_SETJMP2147 2148 /**2149 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.2150 *2151 * @returns Strict VBox status code.2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.2153 * @param pu64 Where to return the opcode dword.2154 */2155 VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2156 {2157 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2158 if (rcStrict == VINF_SUCCESS)2159 {2160 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2161 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2162 pVCpu->iem.s.abOpcode[offOpcode + 1],2163 pVCpu->iem.s.abOpcode[offOpcode + 2],2164 pVCpu->iem.s.abOpcode[offOpcode + 3]);2165 pVCpu->iem.s.offOpcode = offOpcode + 4;2166 }2167 else2168 *pu64 = 0;2169 return rcStrict;2170 }2171 2172 2173 /**2174 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.2175 *2176 * @returns Strict VBox status code.2177 * @param pVCpu The cross context virtual CPU structure of the calling thread.2178 * @param pu64 Where to return the opcode qword.2179 */2180 VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2181 {2182 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2183 if (rcStrict == VINF_SUCCESS)2184 {2185 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2186 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2187 pVCpu->iem.s.abOpcode[offOpcode + 1],2188 pVCpu->iem.s.abOpcode[offOpcode + 2],2189 pVCpu->iem.s.abOpcode[offOpcode + 3]);2190 pVCpu->iem.s.offOpcode = offOpcode + 4;2191 }2192 else2193 *pu64 = 0;2194 return rcStrict;2195 }2196 2197 #endif /* !IEM_WITH_SETJMP */2198 2199 #ifndef IEM_WITH_SETJMP2200 2201 /**2202 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.2203 *2204 * @returns Strict VBox status code.2205 * @param pVCpu The cross context virtual CPU structure of the calling thread.2206 * @param pu64 Where to return the opcode qword.2207 */2208 VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2209 {2210 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);2211 if (rcStrict == VINF_SUCCESS)2212 {2213 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2214 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2215 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2216 # else2217 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2218 pVCpu->iem.s.abOpcode[offOpcode + 1],2219 pVCpu->iem.s.abOpcode[offOpcode + 2],2220 pVCpu->iem.s.abOpcode[offOpcode + 3],2221 pVCpu->iem.s.abOpcode[offOpcode + 4],2222 pVCpu->iem.s.abOpcode[offOpcode + 5],2223 pVCpu->iem.s.abOpcode[offOpcode + 6],2224 pVCpu->iem.s.abOpcode[offOpcode + 7]);2225 # endif2226 pVCpu->iem.s.offOpcode = offOpcode + 8;2227 }2228 else2229 *pu64 = 0;2230 return rcStrict;2231 }2232 2233 #else /* IEM_WITH_SETJMP */2234 2235 /**2236 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.2237 *2238 * @returns The opcode qword.2239 * @param pVCpu The cross context virtual CPU structure of the calling thread.2240 */2241 uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2242 {2243 # ifdef IEM_WITH_CODE_TLB2244 uint64_t u64;2245 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);2246 return u64;2247 # else2248 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);2249 if (rcStrict == VINF_SUCCESS)2250 {2251 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2252 pVCpu->iem.s.offOpcode = offOpcode + 8;2253 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2254 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2255 # else2256 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2257 pVCpu->iem.s.abOpcode[offOpcode + 1],2258 pVCpu->iem.s.abOpcode[offOpcode + 2],2259 pVCpu->iem.s.abOpcode[offOpcode + 3],2260 pVCpu->iem.s.abOpcode[offOpcode + 4],2261 pVCpu->iem.s.abOpcode[offOpcode + 5],2262 pVCpu->iem.s.abOpcode[offOpcode + 6],2263 pVCpu->iem.s.abOpcode[offOpcode + 7]);2264 # endif2265 }2266 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2267 # endif2268 }2269 2270 #endif /* IEM_WITH_SETJMP */2271 2272 2273 2274 /** @name Misc Worker Functions.2275 * @{2276 */2277 2278 /**2279 * Gets the exception class for the specified exception vector.2280 *2281 * @returns The class of the specified exception.2282 * @param uVector The exception vector.2283 */2284 static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT2285 {2286 Assert(uVector <= X86_XCPT_LAST);2287 switch (uVector)2288 {2289 case X86_XCPT_DE:2290 case X86_XCPT_TS:2291 case X86_XCPT_NP:2292 case X86_XCPT_SS:2293 case X86_XCPT_GP:2294 case X86_XCPT_SX: /* AMD only */2295 return IEMXCPTCLASS_CONTRIBUTORY;2296 2297 case X86_XCPT_PF:2298 case X86_XCPT_VE: /* Intel only */2299 return IEMXCPTCLASS_PAGE_FAULT;2300 2301 case X86_XCPT_DF:2302 return IEMXCPTCLASS_DOUBLE_FAULT;2303 }2304 return IEMXCPTCLASS_BENIGN;2305 }2306 2307 2308 /**2309 * Evaluates how to handle an exception caused during delivery of another event2310 * (exception / interrupt).2311 *2312 * @returns How to handle the recursive exception.2313 * @param pVCpu The cross context virtual CPU structure of the2314 * calling thread.2315 * @param fPrevFlags The flags of the previous event.2316 * @param uPrevVector The vector of the previous event.2317 * @param fCurFlags The flags of the current exception.2318 * @param uCurVector The vector of the current exception.2319 * @param pfXcptRaiseInfo Where to store additional information about the2320 * exception condition. Optional.2321 */2322 VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,2323 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)2324 {2325 /*2326 * Only CPU exceptions can be raised while delivering other events, software interrupt2327 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.2328 */2329 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);2330 Assert(pVCpu); RT_NOREF(pVCpu);2331 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));2332 2333 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;2334 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;2335 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)2336 {2337 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);2338 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)2339 {2340 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);2341 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT2342 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT2343 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))2344 {2345 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;2346 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF2347 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;2348 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,2349 uCurVector, pVCpu->cpum.GstCtx.cr2));2350 }2351 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY2352 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)2353 {2354 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;2355 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));2356 }2357 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT2358 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY2359 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))2360 {2361 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;2362 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));2363 }2364 }2365 else2366 {2367 if (uPrevVector == X86_XCPT_NMI)2368 {2369 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;2370 if (uCurVector == X86_XCPT_PF)2371 {2372 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;2373 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));2374 }2375 }2376 else if ( uPrevVector == X86_XCPT_AC2377 && uCurVector == X86_XCPT_AC)2378 {2379 enmRaise = IEMXCPTRAISE_CPU_HANG;2380 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;2381 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));2382 }2383 }2384 }2385 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)2386 {2387 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;2388 if (uCurVector == X86_XCPT_PF)2389 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;2390 }2391 else2392 {2393 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);2394 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;2395 }2396 2397 if (pfXcptRaiseInfo)2398 *pfXcptRaiseInfo = fRaiseInfo;2399 return enmRaise;2400 }2401 2402 2403 /**2404 * Enters the CPU shutdown state initiated by a triple fault or other2405 * unrecoverable conditions.2406 *2407 * @returns Strict VBox status code.2408 * @param pVCpu The cross context virtual CPU structure of the2409 * calling thread.2410 */2411 static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT2412 {2413 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))2414 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);2415 2416 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))2417 {2418 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));2419 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);2420 }2421 2422 RT_NOREF(pVCpu);2423 return VINF_EM_TRIPLE_FAULT;2424 }2425 2426 2427 /**2428 * Validates a new SS segment.2429 *2430 * @returns VBox strict status code.2431 * @param pVCpu The cross context virtual CPU structure of the2432 * calling thread.2433 * @param NewSS The new SS selctor.2434 * @param uCpl The CPL to load the stack for.2435 * @param pDesc Where to return the descriptor.2436 */2437 static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT2438 {2439 /* Null selectors are not allowed (we're not called for dispatching2440 interrupts with SS=0 in long mode). */2441 if (!(NewSS & X86_SEL_MASK_OFF_RPL))2442 {2443 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));2444 return iemRaiseTaskSwitchFault0(pVCpu);2445 }2446 2447 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */2448 if ((NewSS & X86_SEL_RPL) != uCpl)2449 {2450 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));2451 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2452 }2453 2454 /*2455 * Read the descriptor.2456 */2457 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);2458 if (rcStrict != VINF_SUCCESS)2459 return rcStrict;2460 2461 /*2462 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.2463 */2464 if (!pDesc->Legacy.Gen.u1DescType)2465 {2466 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));2467 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2468 }2469 2470 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)2471 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )2472 {2473 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));2474 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2475 }2476 if (pDesc->Legacy.Gen.u2Dpl != uCpl)2477 {2478 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));2479 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2480 }2481 2482 /* Is it there? */2483 /** @todo testcase: Is this checked before the canonical / limit check below? */2484 if (!pDesc->Legacy.Gen.u1Present)2485 {2486 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));2487 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);2488 }2489 2490 return VINF_SUCCESS;2491 }2492 2493 /** @} */2494 2495 2496 /** @name Raising Exceptions.2497 *2498 * @{2499 */2500 2501 2502 /**2503 * Loads the specified stack far pointer from the TSS.2504 *2505 * @returns VBox strict status code.2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.2507 * @param uCpl The CPL to load the stack for.2508 * @param pSelSS Where to return the new stack segment.2509 * @param puEsp Where to return the new stack pointer.2510 */2511 static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT2512 {2513 VBOXSTRICTRC rcStrict;2514 Assert(uCpl < 4);2515 2516 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);2517 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)2518 {2519 /*2520 * 16-bit TSS (X86TSS16).2521 */2522 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();2523 case X86_SEL_TYPE_SYS_286_TSS_BUSY:2524 {2525 uint32_t off = uCpl * 4 + 2;2526 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)2527 {2528 /** @todo check actual access pattern here. */2529 uint32_t u32Tmp = 0; /* gcc maybe... */2530 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2531 if (rcStrict == VINF_SUCCESS)2532 {2533 *puEsp = RT_LOWORD(u32Tmp);2534 *pSelSS = RT_HIWORD(u32Tmp);2535 return VINF_SUCCESS;2536 }2537 }2538 else2539 {2540 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));2541 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2542 }2543 break;2544 }2545 2546 /*2547 * 32-bit TSS (X86TSS32).2548 */2549 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();2550 case X86_SEL_TYPE_SYS_386_TSS_BUSY:2551 {2552 uint32_t off = uCpl * 8 + 4;2553 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)2554 {2555 /** @todo check actual access pattern here. */2556 uint64_t u64Tmp;2557 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2558 if (rcStrict == VINF_SUCCESS)2559 {2560 *puEsp = u64Tmp & UINT32_MAX;2561 *pSelSS = (RTSEL)(u64Tmp >> 32);2562 return VINF_SUCCESS;2563 }2564 }2565 else2566 {2567 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));2568 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2569 }2570 break;2571 }2572 2573 default:2574 AssertFailed();2575 rcStrict = VERR_IEM_IPE_4;2576 break;2577 }2578 2579 *puEsp = 0; /* make gcc happy */2580 *pSelSS = 0; /* make gcc happy */2581 return rcStrict;2582 }2583 2584 2585 /**2586 * Loads the specified stack pointer from the 64-bit TSS.2587 *2588 * @returns VBox strict status code.2589 * @param pVCpu The cross context virtual CPU structure of the calling thread.2590 * @param uCpl The CPL to load the stack for.2591 * @param uIst The interrupt stack table index, 0 if to use uCpl.2592 * @param puRsp Where to return the new stack pointer.2593 */2594 static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT2595 {2596 Assert(uCpl < 4);2597 Assert(uIst < 8);2598 *puRsp = 0; /* make gcc happy */2599 2600 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);2601 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);2602 2603 uint32_t off;2604 if (uIst)2605 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);2606 else2607 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);2608 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)2609 {2610 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));2611 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2612 }2613 2614 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2615 }2616 2617 2618 /**2619 * Adjust the CPU state according to the exception being raised.2620 *2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.2622 * @param u8Vector The exception that has been raised.2623 */2624 DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)2625 {2626 switch (u8Vector)2627 {2628 case X86_XCPT_DB:2629 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);2630 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;2631 break;2632 /** @todo Read the AMD and Intel exception reference... */2633 }2634 }2635 2636 2637 /**2638 * Implements exceptions and interrupts for real mode.2639 *2640 * @returns VBox strict status code.2641 * @param pVCpu The cross context virtual CPU structure of the calling thread.2642 * @param cbInstr The number of bytes to offset rIP by in the return2643 * address.2644 * @param u8Vector The interrupt / exception vector number.2645 * @param fFlags The flags.2646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.2647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.2648 */2649 static VBOXSTRICTRC2650 iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,2651 uint8_t cbInstr,2652 uint8_t u8Vector,2653 uint32_t fFlags,2654 uint16_t uErr,2655 uint64_t uCr2) RT_NOEXCEPT2656 {2657 NOREF(uErr); NOREF(uCr2);2658 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);2659 2660 /*2661 * Read the IDT entry.2662 */2663 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)2664 {2665 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));2666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));2667 }2668 RTFAR16 Idte;2669 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);2670 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))2671 {2672 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));2673 return rcStrict;2674 }2675 2676 #ifdef LOG_ENABLED2677 /* If software interrupt, try decode it if logging is enabled and such. */2678 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)2679 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))2680 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);2681 #endif2682 2683 /*2684 * Push the stack frame.2685 */2686 uint8_t bUnmapInfo;2687 uint16_t *pu16Frame;2688 uint64_t uNewRsp;2689 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);2690 if (rcStrict != VINF_SUCCESS)2691 return rcStrict;2692 2693 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);2694 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC2695 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);2696 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)2697 fEfl |= UINT16_C(0xf000);2698 #endif2699 pu16Frame[2] = (uint16_t)fEfl;2700 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;2701 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;2702 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);2703 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))2704 return rcStrict;2705 2706 /*2707 * Load the vector address into cs:ip and make exception specific state2708 * adjustments.2709 */2710 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;2711 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;2712 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;2713 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;2714 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */2715 pVCpu->cpum.GstCtx.rip = Idte.off;2716 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);2717 IEMMISC_SET_EFL(pVCpu, fEfl);2718 2719 /** @todo do we actually do this in real mode? */2720 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)2721 iemRaiseXcptAdjustState(pVCpu, u8Vector);2722 2723 /*2724 * Deal with debug events that follows the exception and clear inhibit flags.2725 */2726 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)2727 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))2728 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);2729 else2730 {2731 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",2732 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));2733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);2734 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)2735 >> CPUMCTX_DBG_HIT_DRX_SHIFT;2736 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);2737 return iemRaiseDebugException(pVCpu);2738 }2739 2740 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,2741 so best leave them alone in case we're in a weird kind of real mode... */2742 2743 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;2744 }2745 2746 2747 /**2748 * Loads a NULL data selector into when coming from V8086 mode.2749 *2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.2751 * @param pSReg Pointer to the segment register.2752 */2753 DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)2754 {2755 pSReg->Sel = 0;2756 pSReg->ValidSel = 0;2757 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))2758 {2759 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */2760 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;2761 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;2762 }2763 else2764 {2765 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2766 /** @todo check this on AMD-V */2767 pSReg->u64Base = 0;2768 pSReg->u32Limit = 0;2769 }2770 }2771 2772 2773 /**2774 * Loads a segment selector during a task switch in V8086 mode.2775 *2776 * @param pSReg Pointer to the segment register.2777 * @param uSel The selector value to load.2778 */2779 DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)2780 {2781 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */2782 pSReg->Sel = uSel;2783 pSReg->ValidSel = uSel;2784 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2785 pSReg->u64Base = uSel << 4;2786 pSReg->u32Limit = 0xffff;2787 pSReg->Attr.u = 0xf3;2788 }2789 2790 2791 /**2792 * Loads a segment selector during a task switch in protected mode.2793 *2794 * In this task switch scenario, we would throw \#TS exceptions rather than2795 * \#GPs.2796 *2797 * @returns VBox strict status code.2798 * @param pVCpu The cross context virtual CPU structure of the calling thread.2799 * @param pSReg Pointer to the segment register.2800 * @param uSel The new selector value.2801 *2802 * @remarks This does _not_ handle CS or SS.2803 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.2804 */2805 static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT2806 {2807 Assert(!IEM_IS_64BIT_CODE(pVCpu));2808 2809 /* Null data selector. */2810 if (!(uSel & X86_SEL_MASK_OFF_RPL))2811 {2812 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);2813 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));2814 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);2815 return VINF_SUCCESS;2816 }2817 2818 /* Fetch the descriptor. */2819 IEMSELDESC Desc;2820 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);2821 if (rcStrict != VINF_SUCCESS)2822 {2823 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,2824 VBOXSTRICTRC_VAL(rcStrict)));2825 return rcStrict;2826 }2827 2828 /* Must be a data segment or readable code segment. */2829 if ( !Desc.Legacy.Gen.u1DescType2830 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)2831 {2832 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,2833 Desc.Legacy.Gen.u4Type));2834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2835 }2836 2837 /* Check privileges for data segments and non-conforming code segments. */2838 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))2839 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))2840 {2841 /* The RPL and the new CPL must be less than or equal to the DPL. */2842 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl2843 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))2844 {2845 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",2846 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));2847 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2848 }2849 }2850 2851 /* Is it there? */2852 if (!Desc.Legacy.Gen.u1Present)2853 {2854 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));2855 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2856 }2857 2858 /* The base and limit. */2859 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);2860 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);2861 2862 /*2863 * Ok, everything checked out fine. Now set the accessed bit before2864 * committing the result into the registers.2865 */2866 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))2867 {2868 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);2869 if (rcStrict != VINF_SUCCESS)2870 return rcStrict;2871 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;2872 }2873 2874 /* Commit */2875 pSReg->Sel = uSel;2876 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);2877 pSReg->u32Limit = cbLimit;2878 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */2879 pSReg->ValidSel = uSel;2880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))2882 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;2883 2884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));2885 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);2886 return VINF_SUCCESS;2887 }2888 2889 2890 /**2891 * Performs a task switch.2892 *2893 * If the task switch is the result of a JMP, CALL or IRET instruction, the2894 * caller is responsible for performing the necessary checks (like DPL, TSS2895 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction2896 * reference for JMP, CALL, IRET.2897 *2898 * If the task switch is the due to a software interrupt or hardware exception,2899 * the caller is responsible for validating the TSS selector and descriptor. See2900 * Intel Instruction reference for INT n.2901 *2902 * @returns VBox strict status code.2903 * @param pVCpu The cross context virtual CPU structure of the calling thread.2904 * @param enmTaskSwitch The cause of the task switch.2905 * @param uNextEip The EIP effective after the task switch.2906 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.2907 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.2908 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.2909 * @param SelTss The TSS selector of the new task.2910 * @param pNewDescTss Pointer to the new TSS descriptor.2911 */2912 VBOXSTRICTRC2913 iemTaskSwitch(PVMCPUCC pVCpu,2914 IEMTASKSWITCH enmTaskSwitch,2915 uint32_t uNextEip,2916 uint32_t fFlags,2917 uint16_t uErr,2918 uint64_t uCr2,2919 RTSEL SelTss,2920 PIEMSELDESC pNewDescTss) RT_NOEXCEPT2921 {2922 Assert(!IEM_IS_REAL_MODE(pVCpu));2923 Assert(!IEM_IS_64BIT_CODE(pVCpu));2924 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);2925 2926 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;2927 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL2928 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY2929 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2930 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2931 2932 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2933 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2934 2935 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,2936 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));2937 2938 /* Update CR2 in case it's a page-fault. */2939 /** @todo This should probably be done much earlier in IEM/PGM. See2940 * @bugref{5653#c49}. */2941 if (fFlags & IEM_XCPT_FLAGS_CR2)2942 pVCpu->cpum.GstCtx.cr2 = uCr2;2943 2944 /*2945 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"2946 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".2947 */2948 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);2949 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;2950 if (uNewTssLimit < uNewTssLimitMin)2951 {2952 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",2953 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);2955 }2956 2957 /*2958 * Task switches in VMX non-root mode always cause task switches.2959 * The new TSS must have been read and validated (DPL, limits etc.) before a2960 * task-switch VM-exit commences.2961 *2962 * See Intel spec. 25.4.2 "Treatment of Task Switches".2963 */2964 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))2965 {2966 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));2967 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);2968 }2969 2970 /*2971 * The SVM nested-guest intercept for task-switch takes priority over all exceptions2972 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".2973 */2974 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))2975 {2976 uint64_t const uExitInfo1 = SelTss;2977 uint64_t uExitInfo2 = uErr;2978 switch (enmTaskSwitch)2979 {2980 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;2981 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;2982 default: break;2983 }2984 if (fFlags & IEM_XCPT_FLAGS_ERR)2985 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;2986 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)2987 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;2988 2989 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));2990 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);2991 RT_NOREF2(uExitInfo1, uExitInfo2);2992 }2993 2994 /*2995 * Check the current TSS limit. The last written byte to the current TSS during the2996 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).2997 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.2998 *2999 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can3000 * end up with smaller than "legal" TSS limits.3001 */3002 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;3003 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;3004 if (uCurTssLimit < uCurTssLimitMin)3005 {3006 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",3007 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));3008 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);3009 }3010 3011 /*3012 * Verify that the new TSS can be accessed and map it. Map only the required contents3013 * and not the entire TSS.3014 */3015 uint8_t bUnmapInfoNewTss;3016 void *pvNewTss;3017 uint32_t const cbNewTss = uNewTssLimitMin + 1;3018 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);3019 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);3020 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may3021 * not perform correct translation if this happens. See Intel spec. 7.2.13022 * "Task-State Segment". */3023 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);3024 /** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.3025 * Consider wrapping the remainder into a function for simpler cleanup. */3026 if (rcStrict != VINF_SUCCESS)3027 {3028 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,3029 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));3030 return rcStrict;3031 }3032 3033 /*3034 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.3035 */3036 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;3037 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP3038 || enmTaskSwitch == IEMTASKSWITCH_IRET)3039 {3040 uint8_t bUnmapInfoDescCurTss;3041 PX86DESC pDescCurTss;3042 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,3043 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);3044 if (rcStrict != VINF_SUCCESS)3045 {3046 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3047 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3048 return rcStrict;3049 }3050 3051 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3052 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);3053 if (rcStrict != VINF_SUCCESS)3054 {3055 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3056 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3057 return rcStrict;3058 }3059 3060 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */3061 if (enmTaskSwitch == IEMTASKSWITCH_IRET)3062 {3063 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY3064 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);3065 fEFlags &= ~X86_EFL_NT;3066 }3067 }3068 3069 /*3070 * Save the CPU state into the current TSS.3071 */3072 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;3073 if (GCPtrNewTss == GCPtrCurTss)3074 {3075 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));3076 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",3077 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,3078 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,3079 pVCpu->cpum.GstCtx.ldtr.Sel));3080 }3081 if (fIsNewTss386)3082 {3083 /*3084 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.3085 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.3086 */3087 uint8_t bUnmapInfoCurTss32;3088 void *pvCurTss32;3089 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);3090 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);3091 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);3092 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,3093 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);3094 if (rcStrict != VINF_SUCCESS)3095 {3096 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",3097 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));3098 return rcStrict;3099 }3100 3101 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */3102 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);3103 pCurTss32->eip = uNextEip;3104 pCurTss32->eflags = fEFlags;3105 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;3106 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;3107 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;3108 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;3109 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;3110 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;3111 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;3112 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;3113 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;3114 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;3115 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;3116 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;3117 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;3118 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;3119 3120 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);3121 if (rcStrict != VINF_SUCCESS)3122 {3123 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,3124 VBOXSTRICTRC_VAL(rcStrict)));3125 return rcStrict;3126 }3127 }3128 else3129 {3130 /*3131 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.3132 */3133 uint8_t bUnmapInfoCurTss16;3134 void *pvCurTss16;3135 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);3136 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);3137 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);3138 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,3139 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);3140 if (rcStrict != VINF_SUCCESS)3141 {3142 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",3143 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));3144 return rcStrict;3145 }3146 3147 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */3148 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);3149 pCurTss16->ip = uNextEip;3150 pCurTss16->flags = (uint16_t)fEFlags;3151 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;3152 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;3153 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;3154 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;3155 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;3156 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;3157 pCurTss16->si = pVCpu->cpum.GstCtx.si;3158 pCurTss16->di = pVCpu->cpum.GstCtx.di;3159 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;3160 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;3161 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;3162 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;3163 3164 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);3165 if (rcStrict != VINF_SUCCESS)3166 {3167 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,3168 VBOXSTRICTRC_VAL(rcStrict)));3169 return rcStrict;3170 }3171 }3172 3173 /*3174 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.3175 */3176 if ( enmTaskSwitch == IEMTASKSWITCH_CALL3177 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)3178 {3179 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */3180 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;3181 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;3182 }3183 3184 /*3185 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,3186 * it's done further below with error handling (e.g. CR3 changes will go through PGM).3187 */3188 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;3189 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;3190 bool fNewDebugTrap;3191 if (fIsNewTss386)3192 {3193 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;3194 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;3195 uNewEip = pNewTss32->eip;3196 uNewEflags = pNewTss32->eflags;3197 uNewEax = pNewTss32->eax;3198 uNewEcx = pNewTss32->ecx;3199 uNewEdx = pNewTss32->edx;3200 uNewEbx = pNewTss32->ebx;3201 uNewEsp = pNewTss32->esp;3202 uNewEbp = pNewTss32->ebp;3203 uNewEsi = pNewTss32->esi;3204 uNewEdi = pNewTss32->edi;3205 uNewES = pNewTss32->es;3206 uNewCS = pNewTss32->cs;3207 uNewSS = pNewTss32->ss;3208 uNewDS = pNewTss32->ds;3209 uNewFS = pNewTss32->fs;3210 uNewGS = pNewTss32->gs;3211 uNewLdt = pNewTss32->selLdt;3212 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);3213 }3214 else3215 {3216 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;3217 uNewCr3 = 0;3218 uNewEip = pNewTss16->ip;3219 uNewEflags = pNewTss16->flags;3220 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;3221 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;3222 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;3223 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;3224 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;3225 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;3226 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;3227 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;3228 uNewES = pNewTss16->es;3229 uNewCS = pNewTss16->cs;3230 uNewSS = pNewTss16->ss;3231 uNewDS = pNewTss16->ds;3232 uNewFS = 0;3233 uNewGS = 0;3234 uNewLdt = pNewTss16->selLdt;3235 fNewDebugTrap = false;3236 }3237 3238 if (GCPtrNewTss == GCPtrCurTss)3239 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",3240 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));3241 3242 /*3243 * We're done accessing the new TSS.3244 */3245 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);3246 if (rcStrict != VINF_SUCCESS)3247 {3248 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));3249 return rcStrict;3250 }3251 3252 /*3253 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.3254 */3255 if (enmTaskSwitch != IEMTASKSWITCH_IRET)3256 {3257 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,3258 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);3259 if (rcStrict != VINF_SUCCESS)3260 {3261 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3262 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3263 return rcStrict;3264 }3265 3266 /* Check that the descriptor indicates the new TSS is available (not busy). */3267 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL3268 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,3269 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));3270 3271 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3272 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);3273 if (rcStrict != VINF_SUCCESS)3274 {3275 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3276 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3277 return rcStrict;3278 }3279 }3280 3281 /*3282 * From this point on, we're technically in the new task. We will defer exceptions3283 * until the completion of the task switch but before executing any instructions in the new task.3284 */3285 pVCpu->cpum.GstCtx.tr.Sel = SelTss;3286 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;3287 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;3288 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);3289 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);3290 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);3291 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);3292 3293 /* Set the busy bit in TR. */3294 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3295 3296 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */3297 if ( enmTaskSwitch == IEMTASKSWITCH_CALL3298 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)3299 {3300 uNewEflags |= X86_EFL_NT;3301 }3302 3303 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */3304 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;3305 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);3306 3307 pVCpu->cpum.GstCtx.eip = uNewEip;3308 pVCpu->cpum.GstCtx.eax = uNewEax;3309 pVCpu->cpum.GstCtx.ecx = uNewEcx;3310 pVCpu->cpum.GstCtx.edx = uNewEdx;3311 pVCpu->cpum.GstCtx.ebx = uNewEbx;3312 pVCpu->cpum.GstCtx.esp = uNewEsp;3313 pVCpu->cpum.GstCtx.ebp = uNewEbp;3314 pVCpu->cpum.GstCtx.esi = uNewEsi;3315 pVCpu->cpum.GstCtx.edi = uNewEdi;3316 3317 uNewEflags &= X86_EFL_LIVE_MASK;3318 uNewEflags |= X86_EFL_RA1_MASK;3319 IEMMISC_SET_EFL(pVCpu, uNewEflags);3320 3321 /*3322 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors3323 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR33324 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.3325 */3326 pVCpu->cpum.GstCtx.es.Sel = uNewES;3327 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;3328 3329 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;3330 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;3331 3332 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;3333 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;3334 3335 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;3336 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;3337 3338 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;3339 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;3340 3341 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;3342 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;3343 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);3344 3345 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;3346 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;3347 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;3348 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);3349 3350 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))3351 {3352 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;3353 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;3354 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;3355 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;3356 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;3357 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;3358 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;3359 }3360 3361 /*3362 * Switch CR3 for the new task.3363 */3364 if ( fIsNewTss3863365 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))3366 {3367 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */3368 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);3369 AssertRCSuccessReturn(rc, rc);3370 3371 /* Inform PGM. */3372 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */3373 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));3374 AssertRCReturn(rc, rc);3375 /* ignore informational status codes */3376 3377 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);3378 }3379 3380 /*3381 * Switch LDTR for the new task.3382 */3383 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))3384 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);3385 else3386 {3387 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */3388 3389 IEMSELDESC DescNewLdt;3390 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);3391 if (rcStrict != VINF_SUCCESS)3392 {3393 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,3394 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));3395 return rcStrict;3396 }3397 if ( !DescNewLdt.Legacy.Gen.u1Present3398 || DescNewLdt.Legacy.Gen.u1DescType3399 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)3400 {3401 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,3402 uNewLdt, DescNewLdt.Legacy.u));3403 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);3404 }3405 3406 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;3407 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;3408 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);3409 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);3410 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);3411 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))3412 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;3413 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));3414 }3415 3416 IEMSELDESC DescSS;3417 if (IEM_IS_V86_MODE(pVCpu))3418 {3419 IEM_SET_CPL(pVCpu, 3);3420 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);3421 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);3422 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);3423 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);3424 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);3425 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);3426 3427 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */3428 DescSS.Legacy.u = 0;3429 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;3430 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;3431 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;3432 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);3433 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);3434 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;3435 DescSS.Legacy.Gen.u2Dpl = 3;3436 }3437 else3438 {3439 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);3440 3441 /*3442 * Load the stack segment for the new task.3443 */3444 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))3445 {3446 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));3447 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3448 }3449 3450 /* Fetch the descriptor. */3451 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);3452 if (rcStrict != VINF_SUCCESS)3453 {3454 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,3455 VBOXSTRICTRC_VAL(rcStrict)));3456 return rcStrict;3457 }3458 3459 /* SS must be a data segment and writable. */3460 if ( !DescSS.Legacy.Gen.u1DescType3461 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)3462 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))3463 {3464 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",3465 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));3466 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3467 }3468 3469 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */3470 if ( (uNewSS & X86_SEL_RPL) != uNewCpl3471 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)3472 {3473 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,3474 uNewCpl));3475 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3476 }3477 3478 /* Is it there? */3479 if (!DescSS.Legacy.Gen.u1Present)3480 {3481 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));3482 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3483 }3484 3485 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);3486 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);3487 3488 /* Set the accessed bit before committing the result into SS. */3489 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))3490 {3491 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);3492 if (rcStrict != VINF_SUCCESS)3493 return rcStrict;3494 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;3495 }3496 3497 /* Commit SS. */3498 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;3499 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;3500 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);3501 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;3502 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;3503 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;3504 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));3505 3506 /* CPL has changed, update IEM before loading rest of segments. */3507 IEM_SET_CPL(pVCpu, uNewCpl);3508 3509 /*3510 * Load the data segments for the new task.3511 */3512 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);3513 if (rcStrict != VINF_SUCCESS)3514 return rcStrict;3515 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);3516 if (rcStrict != VINF_SUCCESS)3517 return rcStrict;3518 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);3519 if (rcStrict != VINF_SUCCESS)3520 return rcStrict;3521 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);3522 if (rcStrict != VINF_SUCCESS)3523 return rcStrict;3524 3525 /*3526 * Load the code segment for the new task.3527 */3528 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))3529 {3530 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));3531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3532 }3533 3534 /* Fetch the descriptor. */3535 IEMSELDESC DescCS;3536 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);3537 if (rcStrict != VINF_SUCCESS)3538 {3539 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));3540 return rcStrict;3541 }3542 3543 /* CS must be a code segment. */3544 if ( !DescCS.Legacy.Gen.u1DescType3545 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))3546 {3547 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,3548 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));3549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3550 }3551 3552 /* For conforming CS, DPL must be less than or equal to the RPL. */3553 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)3554 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))3555 {3556 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,3557 DescCS.Legacy.Gen.u2Dpl));3558 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3559 }3560 3561 /* For non-conforming CS, DPL must match RPL. */3562 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)3563 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))3564 {3565 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,3566 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));3567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3568 }3569 3570 /* Is it there? */3571 if (!DescCS.Legacy.Gen.u1Present)3572 {3573 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));3574 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3575 }3576 3577 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);3578 u64Base = X86DESC_BASE(&DescCS.Legacy);3579 3580 /* Set the accessed bit before committing the result into CS. */3581 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))3582 {3583 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);3584 if (rcStrict != VINF_SUCCESS)3585 return rcStrict;3586 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;3587 }3588 3589 /* Commit CS. */3590 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;3591 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;3592 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);3593 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;3594 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;3595 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;3596 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));3597 }3598 3599 /* Make sure the CPU mode is correct. */3600 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);3601 if (fExecNew != pVCpu->iem.s.fExec)3602 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));3603 pVCpu->iem.s.fExec = fExecNew;3604 3605 /** @todo Debug trap. */3606 if (fIsNewTss386 && fNewDebugTrap)3607 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));3608 3609 /*3610 * Construct the error code masks based on what caused this task switch.3611 * See Intel Instruction reference for INT.3612 */3613 uint16_t uExt;3614 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT3615 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3616 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))3617 uExt = 1;3618 else3619 uExt = 0;3620 3621 /*3622 * Push any error code on to the new stack.3623 */3624 if (fFlags & IEM_XCPT_FLAGS_ERR)3625 {3626 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);3627 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);3628 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;3629 3630 /* Check that there is sufficient space on the stack. */3631 /** @todo Factor out segment limit checking for normal/expand down segments3632 * into a separate function. */3633 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))3634 {3635 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS3636 || pVCpu->cpum.GstCtx.esp < cbStackFrame)3637 {3638 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */3639 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",3640 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));3641 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);3642 }3643 }3644 else3645 {3646 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))3647 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))3648 {3649 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",3650 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));3651 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);3652 }3653 }3654 3655 3656 if (fIsNewTss386)3657 rcStrict = iemMemStackPushU32(pVCpu, uErr);3658 else3659 rcStrict = iemMemStackPushU16(pVCpu, uErr);3660 if (rcStrict != VINF_SUCCESS)3661 {3662 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",3663 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));3664 return rcStrict;3665 }3666 }3667 3668 /* Check the new EIP against the new CS limit. */3669 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)3670 {3671 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",3672 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));3673 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */3674 return iemRaiseGeneralProtectionFault(pVCpu, uExt);3675 }3676 3677 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,3678 pVCpu->cpum.GstCtx.ss.Sel));3679 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;3680 }3681 3682 3683 /**3684 * Implements exceptions and interrupts for protected mode.3685 *3686 * @returns VBox strict status code.3687 * @param pVCpu The cross context virtual CPU structure of the calling thread.3688 * @param cbInstr The number of bytes to offset rIP by in the return3689 * address.3690 * @param u8Vector The interrupt / exception vector number.3691 * @param fFlags The flags.3692 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.3693 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.3694 */3695 static VBOXSTRICTRC3696 iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,3697 uint8_t cbInstr,3698 uint8_t u8Vector,3699 uint32_t fFlags,3700 uint16_t uErr,3701 uint64_t uCr2) RT_NOEXCEPT3702 {3703 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);3704 3705 /*3706 * Read the IDT entry.3707 */3708 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)3709 {3710 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));3711 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3712 }3713 X86DESC Idte;3714 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,3715 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);3716 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))3717 {3718 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));3719 return rcStrict;3720 }3721 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",3722 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,3723 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,3724 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));3725 3726 /*3727 * Check the descriptor type, DPL and such.3728 * ASSUMES this is done in the same order as described for call-gate calls.3729 */3730 if (Idte.Gate.u1DescType)3731 {3732 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));3733 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3734 }3735 bool fTaskGate = false;3736 uint8_t f32BitGate = true;3737 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;3738 switch (Idte.Gate.u4Type)3739 {3740 case X86_SEL_TYPE_SYS_UNDEFINED:3741 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:3742 case X86_SEL_TYPE_SYS_LDT:3743 case X86_SEL_TYPE_SYS_286_TSS_BUSY:3744 case X86_SEL_TYPE_SYS_286_CALL_GATE:3745 case X86_SEL_TYPE_SYS_UNDEFINED2:3746 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:3747 case X86_SEL_TYPE_SYS_UNDEFINED3:3748 case X86_SEL_TYPE_SYS_386_TSS_BUSY:3749 case X86_SEL_TYPE_SYS_386_CALL_GATE:3750 case X86_SEL_TYPE_SYS_UNDEFINED4:3751 {3752 /** @todo check what actually happens when the type is wrong...3753 * esp. call gates. */3754 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));3755 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3756 }3757 3758 case X86_SEL_TYPE_SYS_286_INT_GATE:3759 f32BitGate = false;3760 RT_FALL_THRU();3761 case X86_SEL_TYPE_SYS_386_INT_GATE:3762 fEflToClear |= X86_EFL_IF;3763 break;3764 3765 case X86_SEL_TYPE_SYS_TASK_GATE:3766 fTaskGate = true;3767 #ifndef IEM_IMPLEMENTS_TASKSWITCH3768 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));3769 #endif3770 break;3771 3772 case X86_SEL_TYPE_SYS_286_TRAP_GATE:3773 f32BitGate = false;3774 break;3775 case X86_SEL_TYPE_SYS_386_TRAP_GATE:3776 break;3777 3778 IEM_NOT_REACHED_DEFAULT_CASE_RET();3779 }3780 3781 /* Check DPL against CPL if applicable. */3782 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)3783 {3784 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)3785 {3786 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));3787 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3788 }3789 }3790 3791 /* Is it there? */3792 if (!Idte.Gate.u1Present)3793 {3794 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));3795 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3796 }3797 3798 /* Is it a task-gate? */3799 if (fTaskGate)3800 {3801 /*3802 * Construct the error code masks based on what caused this task switch.3803 * See Intel Instruction reference for INT.3804 */3805 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3806 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;3807 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;3808 RTSEL SelTss = Idte.Gate.u16Sel;3809 3810 /*3811 * Fetch the TSS descriptor in the GDT.3812 */3813 IEMSELDESC DescTSS;3814 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);3815 if (rcStrict != VINF_SUCCESS)3816 {3817 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,3818 VBOXSTRICTRC_VAL(rcStrict)));3819 return rcStrict;3820 }3821 3822 /* The TSS descriptor must be a system segment and be available (not busy). */3823 if ( DescTSS.Legacy.Gen.u1DescType3824 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL3825 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))3826 {3827 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",3828 u8Vector, SelTss, DescTSS.Legacy.au64));3829 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);3830 }3831 3832 /* The TSS must be present. */3833 if (!DescTSS.Legacy.Gen.u1Present)3834 {3835 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));3836 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);3837 }3838 3839 /* Do the actual task switch. */3840 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,3841 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,3842 fFlags, uErr, uCr2, SelTss, &DescTSS);3843 }3844 3845 /* A null CS is bad. */3846 RTSEL NewCS = Idte.Gate.u16Sel;3847 if (!(NewCS & X86_SEL_MASK_OFF_RPL))3848 {3849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));3850 return iemRaiseGeneralProtectionFault0(pVCpu);3851 }3852 3853 /* Fetch the descriptor for the new CS. */3854 IEMSELDESC DescCS;3855 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */3856 if (rcStrict != VINF_SUCCESS)3857 {3858 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));3859 return rcStrict;3860 }3861 3862 /* Must be a code segment. */3863 if (!DescCS.Legacy.Gen.u1DescType)3864 {3865 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));3866 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3867 }3868 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))3869 {3870 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));3871 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3872 }3873 3874 /* Don't allow lowering the privilege level. */3875 /** @todo Does the lowering of privileges apply to software interrupts3876 * only? This has bearings on the more-privileged or3877 * same-privilege stack behavior further down. A testcase would3878 * be nice. */3879 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))3880 {3881 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",3882 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));3883 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3884 }3885 3886 /* Make sure the selector is present. */3887 if (!DescCS.Legacy.Gen.u1Present)3888 {3889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));3890 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);3891 }3892 3893 #ifdef LOG_ENABLED3894 /* If software interrupt, try decode it if logging is enabled and such. */3895 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3896 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))3897 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);3898 #endif3899 3900 /* Check the new EIP against the new CS limit. */3901 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE3902 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE3903 ? Idte.Gate.u16OffsetLow3904 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);3905 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);3906 if (uNewEip > cbLimitCS)3907 {3908 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",3909 u8Vector, uNewEip, cbLimitCS, NewCS));3910 return iemRaiseGeneralProtectionFault(pVCpu, 0);3911 }3912 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));3913 3914 /* Calc the flag image to push. */3915 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);3916 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))3917 fEfl &= ~X86_EFL_RF;3918 else3919 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */3920 3921 /* From V8086 mode only go to CPL 0. */3922 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF3923 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;3924 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */3925 {3926 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));3927 return iemRaiseGeneralProtectionFault(pVCpu, 0);3928 }3929 3930 /*3931 * If the privilege level changes, we need to get a new stack from the TSS.3932 * This in turns means validating the new SS and ESP...3933 */3934 if (uNewCpl != IEM_GET_CPL(pVCpu))3935 {3936 RTSEL NewSS;3937 uint32_t uNewEsp;3938 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);3939 if (rcStrict != VINF_SUCCESS)3940 return rcStrict;3941 3942 IEMSELDESC DescSS;3943 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);3944 if (rcStrict != VINF_SUCCESS)3945 return rcStrict;3946 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */3947 if (!DescSS.Legacy.Gen.u1DefBig)3948 {3949 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));3950 uNewEsp = (uint16_t)uNewEsp;3951 }3952 3953 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));3954 3955 /* Check that there is sufficient space for the stack frame. */3956 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);3957 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)3958 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate3959 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;3960 3961 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))3962 {3963 if ( uNewEsp - 1 > cbLimitSS3964 || uNewEsp < cbStackFrame)3965 {3966 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",3967 u8Vector, NewSS, uNewEsp, cbStackFrame));3968 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);3969 }3970 }3971 else3972 {3973 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)3974 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))3975 {3976 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",3977 u8Vector, NewSS, uNewEsp, cbStackFrame));3978 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);3979 }3980 }3981 3982 /*3983 * Start making changes.3984 */3985 3986 /* Set the new CPL so that stack accesses use it. */3987 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);3988 IEM_SET_CPL(pVCpu, uNewCpl);3989 3990 /* Create the stack frame. */3991 uint8_t bUnmapInfoStackFrame;3992 RTPTRUNION uStackFrame;3993 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,3994 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),3995 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */3996 if (rcStrict != VINF_SUCCESS)3997 return rcStrict;3998 if (f32BitGate)3999 {4000 if (fFlags & IEM_XCPT_FLAGS_ERR)4001 *uStackFrame.pu32++ = uErr;4002 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4003 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;4004 uStackFrame.pu32[2] = fEfl;4005 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;4006 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;4007 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));4008 if (fEfl & X86_EFL_VM)4009 {4010 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;4011 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;4012 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;4013 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;4014 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;4015 }4016 }4017 else4018 {4019 if (fFlags & IEM_XCPT_FLAGS_ERR)4020 *uStackFrame.pu16++ = uErr;4021 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;4022 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;4023 uStackFrame.pu16[2] = fEfl;4024 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;4025 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;4026 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));4027 if (fEfl & X86_EFL_VM)4028 {4029 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;4030 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;4031 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;4032 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;4033 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;4034 }4035 }4036 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);4037 if (rcStrict != VINF_SUCCESS)4038 return rcStrict;4039 4040 /* Mark the selectors 'accessed' (hope this is the correct time). */4041 /** @todo testcase: excatly _when_ are the accessed bits set - before or4042 * after pushing the stack frame? (Write protect the gdt + stack to4043 * find out.) */4044 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4045 {4046 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4047 if (rcStrict != VINF_SUCCESS)4048 return rcStrict;4049 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4050 }4051 4052 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4053 {4054 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);4055 if (rcStrict != VINF_SUCCESS)4056 return rcStrict;4057 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4058 }4059 4060 /*4061 * Start comitting the register changes (joins with the DPL=CPL branch).4062 */4063 pVCpu->cpum.GstCtx.ss.Sel = NewSS;4064 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;4065 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;4066 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;4067 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);4068 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);4069 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and4070 * 16-bit handler, the high word of ESP remains unchanged (i.e. only4071 * SP is loaded).4072 * Need to check the other combinations too:4073 * - 16-bit TSS, 32-bit handler4074 * - 32-bit TSS, 16-bit handler */4075 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)4076 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);4077 else4078 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;4079 4080 if (fEfl & X86_EFL_VM)4081 {4082 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);4083 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);4084 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);4085 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);4086 }4087 }4088 /*4089 * Same privilege, no stack change and smaller stack frame.4090 */4091 else4092 {4093 uint64_t uNewRsp;4094 uint8_t bUnmapInfoStackFrame;4095 RTPTRUNION uStackFrame;4096 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;4097 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,4098 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);4099 if (rcStrict != VINF_SUCCESS)4100 return rcStrict;4101 4102 if (f32BitGate)4103 {4104 if (fFlags & IEM_XCPT_FLAGS_ERR)4105 *uStackFrame.pu32++ = uErr;4106 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4107 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);4108 uStackFrame.pu32[2] = fEfl;4109 }4110 else4111 {4112 if (fFlags & IEM_XCPT_FLAGS_ERR)4113 *uStackFrame.pu16++ = uErr;4114 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4115 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);4116 uStackFrame.pu16[2] = fEfl;4117 }4118 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */4119 if (rcStrict != VINF_SUCCESS)4120 return rcStrict;4121 4122 /* Mark the CS selector as 'accessed'. */4123 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4124 {4125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4126 if (rcStrict != VINF_SUCCESS)4127 return rcStrict;4128 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4129 }4130 4131 /*4132 * Start committing the register changes (joins with the other branch).4133 */4134 pVCpu->cpum.GstCtx.rsp = uNewRsp;4135 }4136 4137 /* ... register committing continues. */4138 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4139 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4140 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;4141 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;4142 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);4143 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);4144 4145 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */4146 fEfl &= ~fEflToClear;4147 IEMMISC_SET_EFL(pVCpu, fEfl);4148 4149 if (fFlags & IEM_XCPT_FLAGS_CR2)4150 pVCpu->cpum.GstCtx.cr2 = uCr2;4151 4152 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)4153 iemRaiseXcptAdjustState(pVCpu, u8Vector);4154 4155 /* Make sure the execution flags are correct. */4156 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);4157 if (fExecNew != pVCpu->iem.s.fExec)4158 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",4159 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));4160 pVCpu->iem.s.fExec = fExecNew;4161 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);4162 4163 /*4164 * Deal with debug events that follows the exception and clear inhibit flags.4165 */4166 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4167 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))4168 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4169 else4170 {4171 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",4172 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));4173 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);4174 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4175 >> CPUMCTX_DBG_HIT_DRX_SHIFT;4176 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4177 return iemRaiseDebugException(pVCpu);4178 }4179 4180 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;4181 }4182 4183 4184 /**4185 * Implements exceptions and interrupts for long mode.4186 *4187 * @returns VBox strict status code.4188 * @param pVCpu The cross context virtual CPU structure of the calling thread.4189 * @param cbInstr The number of bytes to offset rIP by in the return4190 * address.4191 * @param u8Vector The interrupt / exception vector number.4192 * @param fFlags The flags.4193 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.4194 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.4195 */4196 static VBOXSTRICTRC4197 iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,4198 uint8_t cbInstr,4199 uint8_t u8Vector,4200 uint32_t fFlags,4201 uint16_t uErr,4202 uint64_t uCr2) RT_NOEXCEPT4203 {4204 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4205 4206 /*4207 * Read the IDT entry.4208 */4209 uint16_t offIdt = (uint16_t)u8Vector << 4;4210 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)4211 {4212 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));4213 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4214 }4215 X86DESC64 Idte;4216 #ifdef _MSC_VER /* Shut up silly compiler warning. */4217 Idte.au64[0] = 0;4218 Idte.au64[1] = 0;4219 #endif4220 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);4221 if (RT_LIKELY(rcStrict == VINF_SUCCESS))4222 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);4223 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))4224 {4225 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));4226 return rcStrict;4227 }4228 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",4229 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,4230 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));4231 4232 /*4233 * Check the descriptor type, DPL and such.4234 * ASSUMES this is done in the same order as described for call-gate calls.4235 */4236 if (Idte.Gate.u1DescType)4237 {4238 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));4239 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4240 }4241 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;4242 switch (Idte.Gate.u4Type)4243 {4244 case AMD64_SEL_TYPE_SYS_INT_GATE:4245 fEflToClear |= X86_EFL_IF;4246 break;4247 case AMD64_SEL_TYPE_SYS_TRAP_GATE:4248 break;4249 4250 default:4251 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));4252 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4253 }4254 4255 /* Check DPL against CPL if applicable. */4256 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)4257 {4258 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)4259 {4260 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));4261 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4262 }4263 }4264 4265 /* Is it there? */4266 if (!Idte.Gate.u1Present)4267 {4268 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));4269 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4270 }4271 4272 /* A null CS is bad. */4273 RTSEL NewCS = Idte.Gate.u16Sel;4274 if (!(NewCS & X86_SEL_MASK_OFF_RPL))4275 {4276 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));4277 return iemRaiseGeneralProtectionFault0(pVCpu);4278 }4279 4280 /* Fetch the descriptor for the new CS. */4281 IEMSELDESC DescCS;4282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);4283 if (rcStrict != VINF_SUCCESS)4284 {4285 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));4286 return rcStrict;4287 }4288 4289 /* Must be a 64-bit code segment. */4290 if (!DescCS.Long.Gen.u1DescType)4291 {4292 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));4293 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4294 }4295 if ( !DescCS.Long.Gen.u1Long4296 || DescCS.Long.Gen.u1DefBig4297 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )4298 {4299 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",4300 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));4301 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4302 }4303 4304 /* Don't allow lowering the privilege level. For non-conforming CS4305 selectors, the CS.DPL sets the privilege level the trap/interrupt4306 handler runs at. For conforming CS selectors, the CPL remains4307 unchanged, but the CS.DPL must be <= CPL. */4308 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched4309 * when CPU in Ring-0. Result \#GP? */4310 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))4311 {4312 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",4313 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));4314 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4315 }4316 4317 4318 /* Make sure the selector is present. */4319 if (!DescCS.Legacy.Gen.u1Present)4320 {4321 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));4322 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);4323 }4324 4325 /* Check that the new RIP is canonical. */4326 uint64_t const uNewRip = Idte.Gate.u16OffsetLow4327 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)4328 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);4329 if (!IEM_IS_CANONICAL(uNewRip))4330 {4331 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));4332 return iemRaiseGeneralProtectionFault0(pVCpu);4333 }4334 4335 /*4336 * If the privilege level changes or if the IST isn't zero, we need to get4337 * a new stack from the TSS.4338 */4339 uint64_t uNewRsp;4340 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF4341 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;4342 if ( uNewCpl != IEM_GET_CPL(pVCpu)4343 || Idte.Gate.u3IST != 0)4344 {4345 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);4346 if (rcStrict != VINF_SUCCESS)4347 return rcStrict;4348 }4349 else4350 uNewRsp = pVCpu->cpum.GstCtx.rsp;4351 uNewRsp &= ~(uint64_t)0xf;4352 4353 /*4354 * Calc the flag image to push.4355 */4356 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);4357 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))4358 fEfl &= ~X86_EFL_RF;4359 else4360 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */4361 4362 /*4363 * Start making changes.4364 */4365 /* Set the new CPL so that stack accesses use it. */4366 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);4367 IEM_SET_CPL(pVCpu, uNewCpl);4368 /** @todo Setting CPL this early seems wrong as it would affect and errors we4369 * raise accessing the stack and (?) GDT/LDT... */4370 4371 /* Create the stack frame. */4372 uint8_t bUnmapInfoStackFrame;4373 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));4374 RTPTRUNION uStackFrame;4375 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,4376 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */4377 if (rcStrict != VINF_SUCCESS)4378 return rcStrict;4379 4380 if (fFlags & IEM_XCPT_FLAGS_ERR)4381 *uStackFrame.pu64++ = uErr;4382 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;4383 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */4384 uStackFrame.pu64[2] = fEfl;4385 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;4386 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;4387 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);4388 if (rcStrict != VINF_SUCCESS)4389 return rcStrict;4390 4391 /* Mark the CS selectors 'accessed' (hope this is the correct time). */4392 /** @todo testcase: excatly _when_ are the accessed bits set - before or4393 * after pushing the stack frame? (Write protect the gdt + stack to4394 * find out.) */4395 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4396 {4397 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4398 if (rcStrict != VINF_SUCCESS)4399 return rcStrict;4400 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4401 }4402 4403 /*4404 * Start comitting the register changes.4405 */4406 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the4407 * hidden registers when interrupting 32-bit or 16-bit code! */4408 if (uNewCpl != uOldCpl)4409 {4410 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;4411 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;4412 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;4413 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;4414 pVCpu->cpum.GstCtx.ss.u64Base = 0;4415 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;4416 }4417 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;4418 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4419 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4420 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;4421 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);4422 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);4423 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);4424 pVCpu->cpum.GstCtx.rip = uNewRip;4425 4426 fEfl &= ~fEflToClear;4427 IEMMISC_SET_EFL(pVCpu, fEfl);4428 4429 if (fFlags & IEM_XCPT_FLAGS_CR2)4430 pVCpu->cpum.GstCtx.cr2 = uCr2;4431 4432 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)4433 iemRaiseXcptAdjustState(pVCpu, u8Vector);4434 4435 iemRecalcExecModeAndCplAndAcFlags(pVCpu);4436 4437 /*4438 * Deal with debug events that follows the exception and clear inhibit flags.4439 */4440 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4441 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))4442 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4443 else4444 {4445 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",4446 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));4447 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);4448 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4449 >> CPUMCTX_DBG_HIT_DRX_SHIFT;4450 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4451 return iemRaiseDebugException(pVCpu);4452 }4453 4454 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;4455 }4456 4457 4458 /**4459 * Implements exceptions and interrupts.4460 *4461 * All exceptions and interrupts goes thru this function!4462 *4463 * @returns VBox strict status code.4464 * @param pVCpu The cross context virtual CPU structure of the calling thread.4465 * @param cbInstr The number of bytes to offset rIP by in the return4466 * address.4467 * @param u8Vector The interrupt / exception vector number.4468 * @param fFlags The flags.4469 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.4470 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.4471 */4472 VBOXSTRICTRC4473 iemRaiseXcptOrInt(PVMCPUCC pVCpu,4474 uint8_t cbInstr,4475 uint8_t u8Vector,4476 uint32_t fFlags,4477 uint16_t uErr,4478 uint64_t uCr2) RT_NOEXCEPT4479 {4480 /*4481 * Get all the state that we might need here.4482 */4483 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4484 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4485 4486 #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */4487 /*4488 * Flush prefetch buffer4489 */4490 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;4491 #endif4492 4493 /*4494 * Perform the V8086 IOPL check and upgrade the fault without nesting.4495 */4496 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM4497 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 34498 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT4499 | IEM_XCPT_FLAGS_BP_INSTR4500 | IEM_XCPT_FLAGS_ICEBP_INSTR4501 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT4502 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )4503 {4504 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));4505 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;4506 u8Vector = X86_XCPT_GP;4507 uErr = 0;4508 }4509 4510 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);4511 #ifdef DBGFTRACE_ENABLED4512 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",4513 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,4514 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);4515 #endif4516 4517 /*4518 * Check if DBGF wants to intercept the exception.4519 */4520 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))4521 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )4522 { /* likely */ }4523 else4524 {4525 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),4526 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);4527 if (rcStrict != VINF_SUCCESS)4528 return rcStrict;4529 }4530 4531 /*4532 * Evaluate whether NMI blocking should be in effect.4533 * Normally, NMI blocking is in effect whenever we inject an NMI.4534 */4535 bool fBlockNmi = u8Vector == X86_XCPT_NMI4536 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);4537 4538 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4539 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))4540 {4541 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);4542 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)4543 return rcStrict0;4544 4545 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */4546 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)4547 {4548 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));4549 fBlockNmi = false;4550 }4551 }4552 #endif4553 4554 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM4555 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))4556 {4557 /*4558 * If the event is being injected as part of VMRUN, it isn't subject to event4559 * intercepts in the nested-guest. However, secondary exceptions that occur4560 * during injection of any event -are- subject to exception intercepts.4561 *4562 * See AMD spec. 15.20 "Event Injection".4563 */4564 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)4565 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;4566 else4567 {4568 /*4569 * Check and handle if the event being raised is intercepted.4570 */4571 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4572 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)4573 return rcStrict0;4574 }4575 }4576 #endif4577 4578 /*4579 * Set NMI blocking if necessary.4580 */4581 if (fBlockNmi)4582 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);4583 4584 /*4585 * Do recursion accounting.4586 */4587 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;4588 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;4589 if (pVCpu->iem.s.cXcptRecursions == 0)4590 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",4591 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));4592 else4593 {4594 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",4595 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,4596 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));4597 4598 if (pVCpu->iem.s.cXcptRecursions >= 4)4599 {4600 #ifdef DEBUG_bird4601 AssertFailed();4602 #endif4603 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));4604 }4605 4606 /*4607 * Evaluate the sequence of recurring events.4608 */4609 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,4610 NULL /* pXcptRaiseInfo */);4611 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)4612 { /* likely */ }4613 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)4614 {4615 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));4616 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;4617 u8Vector = X86_XCPT_DF;4618 uErr = 0;4619 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4620 /* VMX nested-guest #DF intercept needs to be checked here. */4621 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))4622 {4623 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);4624 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)4625 return rcStrict0;4626 }4627 #endif4628 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */4629 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))4630 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4631 }4632 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)4633 {4634 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));4635 return iemInitiateCpuShutdown(pVCpu);4636 }4637 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)4638 {4639 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */4640 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));4641 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))4642 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))4643 return VERR_EM_GUEST_CPU_HANG;4644 }4645 else4646 {4647 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",4648 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));4649 return VERR_IEM_IPE_9;4650 }4651 4652 /*4653 * The 'EXT' bit is set when an exception occurs during deliver of an external4654 * event (such as an interrupt or earlier exception)[1]. Privileged software4655 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software4656 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.4657 *4658 * [1] - Intel spec. 6.13 "Error Code"4659 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".4660 * [3] - Intel Instruction reference for INT n.4661 */4662 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))4663 && (fFlags & IEM_XCPT_FLAGS_ERR)4664 && u8Vector != X86_XCPT_PF4665 && u8Vector != X86_XCPT_DF)4666 {4667 uErr |= X86_TRAP_ERR_EXTERNAL;4668 }4669 }4670 4671 pVCpu->iem.s.cXcptRecursions++;4672 pVCpu->iem.s.uCurXcpt = u8Vector;4673 pVCpu->iem.s.fCurXcpt = fFlags;4674 pVCpu->iem.s.uCurXcptErr = uErr;4675 pVCpu->iem.s.uCurXcptCr2 = uCr2;4676 4677 /*4678 * Extensive logging.4679 */4680 #if defined(LOG_ENABLED) && defined(IN_RING3)4681 if (LogIs3Enabled())4682 {4683 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);4684 char szRegs[4096];4685 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),4686 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"4687 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"4688 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"4689 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"4690 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"4691 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"4692 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"4693 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"4694 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"4695 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"4696 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"4697 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"4698 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"4699 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"4700 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"4701 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"4702 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"4703 " efer=%016VR{efer}\n"4704 " pat=%016VR{pat}\n"4705 " sf_mask=%016VR{sf_mask}\n"4706 "krnl_gs_base=%016VR{krnl_gs_base}\n"4707 " lstar=%016VR{lstar}\n"4708 " star=%016VR{star} cstar=%016VR{cstar}\n"4709 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"4710 );4711 4712 char szInstr[256];4713 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,4714 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,4715 szInstr, sizeof(szInstr), NULL);4716 Log3(("%s%s\n", szRegs, szInstr));4717 }4718 #endif /* LOG_ENABLED */4719 4720 /*4721 * Stats.4722 */4723 uint64_t const uTimestamp = ASMReadTSC();4724 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))4725 {4726 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });4727 EMHistoryAddExit(pVCpu,4728 fFlags & IEM_XCPT_FLAGS_T_EXT_INT4729 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)4730 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),4731 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);4732 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);4733 }4734 else4735 {4736 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))4737 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);4738 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),4739 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);4740 if (fFlags & IEM_XCPT_FLAGS_ERR)4741 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);4742 if (fFlags & IEM_XCPT_FLAGS_CR2)4743 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);4744 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);4745 }4746 4747 /*4748 * Hack alert! Convert incoming debug events to slient on Intel.4749 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.4750 */4751 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4752 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4753 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))4754 { /* ignore */ }4755 else4756 {4757 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",4758 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));4759 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)4760 | CPUMCTX_DBG_HIT_DRX_SILENT;4761 }4762 4763 /*4764 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)4765 * to ensure that a stale TLB or paging cache entry will only cause one4766 * spurious #PF.4767 */4768 if ( u8Vector == X86_XCPT_PF4769 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))4770 IEMTlbInvalidatePage(pVCpu, uCr2);4771 4772 /*4773 * Call the mode specific worker function.4774 */4775 VBOXSTRICTRC rcStrict;4776 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))4777 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4778 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)4779 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4780 else4781 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4782 4783 /* Flush the prefetch buffer. */4784 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));4785 4786 /*4787 * Unwind.4788 */4789 pVCpu->iem.s.cXcptRecursions--;4790 pVCpu->iem.s.uCurXcpt = uPrevXcpt;4791 pVCpu->iem.s.fCurXcpt = fPrevXcpt;4792 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",4793 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,4794 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));4795 return rcStrict;4796 }4797 4798 #ifdef IEM_WITH_SETJMP4799 /**4800 * See iemRaiseXcptOrInt. Will not return.4801 */4802 DECL_NO_RETURN(void)4803 iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,4804 uint8_t cbInstr,4805 uint8_t u8Vector,4806 uint32_t fFlags,4807 uint16_t uErr,4808 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP4809 {4810 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4811 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));4812 }4813 #endif4814 4815 4816 /** \#DE - 00. */4817 VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT4818 {4819 if (GCMIsInterceptingXcptDE(pVCpu))4820 {4821 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);4822 if (rc == VINF_SUCCESS)4823 {4824 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));4825 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */4826 }4827 }4828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4829 }4830 4831 4832 #ifdef IEM_WITH_SETJMP4833 /** \#DE - 00. */4834 DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4835 {4836 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4837 }4838 #endif4839 4840 4841 /** \#DB - 01.4842 * @note This automatically clear DR7.GD. */4843 VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT4844 {4845 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */4846 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;4847 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);4848 }4849 4850 4851 /** \#BR - 05. */4852 VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT4853 {4854 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4855 }4856 4857 4858 /** \#UD - 06. */4859 VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT4860 {4861 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4862 }4863 4864 4865 #ifdef IEM_WITH_SETJMP4866 /** \#UD - 06. */4867 DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4868 {4869 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4870 }4871 #endif4872 4873 4874 /** \#NM - 07. */4875 VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT4876 {4877 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4878 }4879 4880 4881 #ifdef IEM_WITH_SETJMP4882 /** \#NM - 07. */4883 DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4884 {4885 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4886 }4887 #endif4888 4889 4890 /** \#TS(err) - 0a. */4891 VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4892 {4893 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4894 }4895 4896 4897 /** \#TS(tr) - 0a. */4898 VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT4899 {4900 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4901 pVCpu->cpum.GstCtx.tr.Sel, 0);4902 }4903 4904 4905 /** \#TS(0) - 0a. */4906 VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT4907 {4908 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4909 0, 0);4910 }4911 4912 4913 /** \#TS(err) - 0a. */4914 VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4915 {4916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4917 uSel & X86_SEL_MASK_OFF_RPL, 0);4918 }4919 4920 4921 /** \#NP(err) - 0b. */4922 VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4923 {4924 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4925 }4926 4927 4928 /** \#NP(sel) - 0b. */4929 VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4930 {4931 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",4932 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));4933 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4934 uSel & ~X86_SEL_RPL, 0);4935 }4936 4937 4938 /** \#SS(seg) - 0c. */4939 VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4940 {4941 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",4942 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));4943 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4944 uSel & ~X86_SEL_RPL, 0);4945 }4946 4947 4948 /** \#SS(err) - 0c. */4949 VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4950 {4951 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",4952 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));4953 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4954 }4955 4956 4957 /** \#GP(n) - 0d. */4958 VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4959 {4960 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));4961 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4962 }4963 4964 4965 /** \#GP(0) - 0d. */4966 VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT4967 {4968 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4969 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4970 }4971 4972 #ifdef IEM_WITH_SETJMP4973 /** \#GP(0) - 0d. */4974 DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4975 {4976 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4977 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4978 }4979 #endif4980 4981 4982 /** \#GP(sel) - 0d. */4983 VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT4984 {4985 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",4986 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));4987 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4988 Sel & ~X86_SEL_RPL, 0);4989 }4990 4991 4992 /** \#GP(0) - 0d. */4993 VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT4994 {4995 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4996 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4997 }4998 4999 5000 /** \#GP(sel) - 0d. */5001 VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT5002 {5003 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5004 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5005 NOREF(iSegReg); NOREF(fAccess);5006 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,5007 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5008 }5009 5010 #ifdef IEM_WITH_SETJMP5011 /** \#GP(sel) - 0d, longjmp. */5012 DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP5013 {5014 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5015 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5016 NOREF(iSegReg); NOREF(fAccess);5017 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,5018 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5019 }5020 #endif5021 5022 /** \#GP(sel) - 0d. */5023 VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT5024 {5025 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));5027 NOREF(Sel);5028 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5029 }5030 5031 #ifdef IEM_WITH_SETJMP5032 /** \#GP(sel) - 0d, longjmp. */5033 DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP5034 {5035 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",5036 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));5037 NOREF(Sel);5038 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5039 }5040 #endif5041 5042 5043 /** \#GP(sel) - 0d. */5044 VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT5045 {5046 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5047 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5048 NOREF(iSegReg); NOREF(fAccess);5049 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5050 }5051 5052 #ifdef IEM_WITH_SETJMP5053 /** \#GP(sel) - 0d, longjmp. */5054 DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP5055 {5056 NOREF(iSegReg); NOREF(fAccess);5057 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5058 }5059 #endif5060 5061 5062 /** \#PF(n) - 0e. */5063 VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT5064 {5065 uint16_t uErr;5066 switch (rc)5067 {5068 case VERR_PAGE_NOT_PRESENT:5069 case VERR_PAGE_TABLE_NOT_PRESENT:5070 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:5071 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:5072 uErr = 0;5073 break;5074 5075 case VERR_RESERVED_PAGE_TABLE_BITS:5076 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;5077 break;5078 5079 default:5080 AssertMsgFailed(("%Rrc\n", rc));5081 RT_FALL_THRU();5082 case VERR_ACCESS_DENIED:5083 uErr = X86_TRAP_PF_P;5084 break;5085 }5086 5087 if (IEM_GET_CPL(pVCpu) == 3)5088 uErr |= X86_TRAP_PF_US;5089 5090 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE5091 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)5092 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )5093 uErr |= X86_TRAP_PF_ID;5094 5095 #if 0 /* This is so much non-sense, really. Why was it done like that? */5096 /* Note! RW access callers reporting a WRITE protection fault, will clear5097 the READ flag before calling. So, read-modify-write accesses (RW)5098 can safely be reported as READ faults. */5099 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)5100 uErr |= X86_TRAP_PF_RW;5101 #else5102 if (fAccess & IEM_ACCESS_TYPE_WRITE)5103 {5104 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg5105 /// (regardless of outcome of the comparison in the latter case).5106 //if (!(fAccess & IEM_ACCESS_TYPE_READ))5107 uErr |= X86_TRAP_PF_RW;5108 }5109 #endif5110 5111 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address5112 of the memory operand rather than at the start of it. (Not sure what5113 happens if it crosses a page boundrary.) The current heuristics for5114 this is to report the #PF for the last byte if the access is more than5115 64 bytes. This is probably not correct, but we can work that out later,5116 main objective now is to get FXSAVE to work like for real hardware and5117 make bs3-cpu-basic2 work. */5118 if (cbAccess <= 64)5119 { /* likely*/ }5120 else5121 GCPtrWhere += cbAccess - 1;5122 5123 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,5124 uErr, GCPtrWhere);5125 }5126 5127 #ifdef IEM_WITH_SETJMP5128 /** \#PF(n) - 0e, longjmp. */5129 DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,5130 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP5131 {5132 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));5133 }5134 #endif5135 5136 5137 /** \#MF(0) - 10. */5138 VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT5139 {5140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)5141 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5142 5143 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */5144 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);5145 return iemRegUpdateRipAndFinishClearingRF(pVCpu);5146 }5147 5148 #ifdef IEM_WITH_SETJMP5149 /** \#MF(0) - 10, longjmp. */5150 DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5151 {5152 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));5153 }5154 #endif5155 5156 5157 /** \#AC(0) - 11. */5158 VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT5159 {5160 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5161 }5162 5163 #ifdef IEM_WITH_SETJMP5164 /** \#AC(0) - 11, longjmp. */5165 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5166 {5167 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));5168 }5169 #endif5170 5171 5172 /** \#XF(0)/\#XM(0) - 19. */5173 VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT5174 {5175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5176 }5177 5178 5179 #ifdef IEM_WITH_SETJMP5180 /** \#XF(0)/\#XM(0) - 19s, longjmp. */5181 DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5182 {5183 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));5184 }5185 #endif5186 5187 5188 /** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */5189 IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)5190 {5191 NOREF(cbInstr);5192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5193 }5194 5195 5196 /** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */5197 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)5198 {5199 NOREF(cbInstr);5200 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5201 }5202 5203 5204 /** Accessed via IEMOP_RAISE_INVALID_OPCODE. */5205 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)5206 {5207 NOREF(cbInstr);5208 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5209 }5210 5211 5212 /** @} */5213 5214 /** @name Common opcode decoders.5215 * @{5216 */5217 //#include <iprt/mem.h>5218 5219 /**5220 * Used to add extra details about a stub case.5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.5222 */5223 void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT5224 {5225 #if defined(LOG_ENABLED) && defined(IN_RING3)5226 PVM pVM = pVCpu->CTX_SUFF(pVM);5227 char szRegs[4096];5228 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),5229 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"5230 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"5231 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"5232 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"5233 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"5234 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"5235 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"5236 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"5237 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"5238 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"5239 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"5240 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"5241 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"5242 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"5243 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"5244 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"5245 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"5246 " efer=%016VR{efer}\n"5247 " pat=%016VR{pat}\n"5248 " sf_mask=%016VR{sf_mask}\n"5249 "krnl_gs_base=%016VR{krnl_gs_base}\n"5250 " lstar=%016VR{lstar}\n"5251 " star=%016VR{star} cstar=%016VR{cstar}\n"5252 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"5253 );5254 5255 char szInstr[256];5256 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,5257 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,5258 szInstr, sizeof(szInstr), NULL);5259 5260 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);5261 #else5262 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);5263 #endif5264 }5265 5266 /** @} */5267 5268 5269 5270 /** @name Register Access.5271 * @{5272 */5273 5274 /**5275 * Adds a 8-bit signed jump offset to RIP/EIP/IP.5276 *5277 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5278 * segment limit.5279 *5280 * @param pVCpu The cross context virtual CPU structure of the calling thread.5281 * @param cbInstr Instruction size.5282 * @param offNextInstr The offset of the next instruction.5283 * @param enmEffOpSize Effective operand size.5284 */5285 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,5286 IEMMODE enmEffOpSize) RT_NOEXCEPT5287 {5288 switch (enmEffOpSize)5289 {5290 case IEMMODE_16BIT:5291 {5292 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;5293 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit5294 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))5295 pVCpu->cpum.GstCtx.rip = uNewIp;5296 else5297 return iemRaiseGeneralProtectionFault0(pVCpu);5298 break;5299 }5300 5301 case IEMMODE_32BIT:5302 {5303 Assert(!IEM_IS_64BIT_CODE(pVCpu));5304 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);5305 5306 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;5307 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))5308 pVCpu->cpum.GstCtx.rip = uNewEip;5309 else5310 return iemRaiseGeneralProtectionFault0(pVCpu);5311 break;5312 }5313 5314 case IEMMODE_64BIT:5315 {5316 Assert(IEM_IS_64BIT_CODE(pVCpu));5317 5318 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;5319 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))5320 pVCpu->cpum.GstCtx.rip = uNewRip;5321 else5322 return iemRaiseGeneralProtectionFault0(pVCpu);5323 break;5324 }5325 5326 IEM_NOT_REACHED_DEFAULT_CASE_RET();5327 }5328 5329 #ifndef IEM_WITH_CODE_TLB5330 /* Flush the prefetch buffer. */5331 pVCpu->iem.s.cbOpcode = cbInstr;5332 #endif5333 5334 /*5335 * Clear RF and finish the instruction (maybe raise #DB).5336 */5337 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5338 }5339 5340 5341 /**5342 * Adds a 16-bit signed jump offset to RIP/EIP/IP.5343 *5344 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5345 * segment limit.5346 *5347 * @returns Strict VBox status code.5348 * @param pVCpu The cross context virtual CPU structure of the calling thread.5349 * @param cbInstr Instruction size.5350 * @param offNextInstr The offset of the next instruction.5351 */5352 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT5353 {5354 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);5355 5356 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;5357 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit5358 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))5359 pVCpu->cpum.GstCtx.rip = uNewIp;5360 else5361 return iemRaiseGeneralProtectionFault0(pVCpu);5362 5363 #ifndef IEM_WITH_CODE_TLB5364 /* Flush the prefetch buffer. */5365 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);5366 #endif5367 5368 /*5369 * Clear RF and finish the instruction (maybe raise #DB).5370 */5371 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5372 }5373 5374 5375 /**5376 * Adds a 32-bit signed jump offset to RIP/EIP/IP.5377 *5378 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5379 * segment limit.5380 *5381 * @returns Strict VBox status code.5382 * @param pVCpu The cross context virtual CPU structure of the calling thread.5383 * @param cbInstr Instruction size.5384 * @param offNextInstr The offset of the next instruction.5385 * @param enmEffOpSize Effective operand size.5386 */5387 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,5388 IEMMODE enmEffOpSize) RT_NOEXCEPT5389 {5390 if (enmEffOpSize == IEMMODE_32BIT)5391 {5392 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));5393 5394 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;5395 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))5396 pVCpu->cpum.GstCtx.rip = uNewEip;5397 else5398 return iemRaiseGeneralProtectionFault0(pVCpu);5399 }5400 else5401 {5402 Assert(enmEffOpSize == IEMMODE_64BIT);5403 5404 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;5405 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))5406 pVCpu->cpum.GstCtx.rip = uNewRip;5407 else5408 return iemRaiseGeneralProtectionFault0(pVCpu);5409 }5410 5411 #ifndef IEM_WITH_CODE_TLB5412 /* Flush the prefetch buffer. */5413 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);5414 #endif5415 5416 /*5417 * Clear RF and finish the instruction (maybe raise #DB).5418 */5419 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5420 }5421 5422 /** @} */5423 5424 5425 /** @name FPU access and helpers.5426 *5427 * @{5428 */5429 5430 /**5431 * Updates the x87.DS and FPUDP registers.5432 *5433 * @param pVCpu The cross context virtual CPU structure of the calling thread.5434 * @param pFpuCtx The FPU context.5435 * @param iEffSeg The effective segment register.5436 * @param GCPtrEff The effective address relative to @a iEffSeg.5437 */5438 DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)5439 {5440 RTSEL sel;5441 switch (iEffSeg)5442 {5443 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;5444 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;5445 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;5446 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;5447 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;5448 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;5449 default:5450 AssertMsgFailed(("%d\n", iEffSeg));5451 sel = pVCpu->cpum.GstCtx.ds.Sel;5452 }5453 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */5454 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))5455 {5456 pFpuCtx->DS = 0;5457 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);5458 }5459 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */5460 {5461 pFpuCtx->DS = sel;5462 pFpuCtx->FPUDP = GCPtrEff;5463 }5464 else5465 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;5466 }5467 5468 5469 /**5470 * Rotates the stack registers in the push direction.5471 *5472 * @param pFpuCtx The FPU context.5473 * @remarks This is a complete waste of time, but fxsave stores the registers in5474 * stack order.5475 */5476 DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)5477 {5478 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;5479 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;5480 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;5481 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;5482 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;5483 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;5484 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;5485 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;5486 pFpuCtx->aRegs[0].r80 = r80Tmp;5487 }5488 5489 5490 /**5491 * Rotates the stack registers in the pop direction.5492 *5493 * @param pFpuCtx The FPU context.5494 * @remarks This is a complete waste of time, but fxsave stores the registers in5495 * stack order.5496 */5497 DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)5498 {5499 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;5500 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;5501 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;5502 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;5503 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;5504 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;5505 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;5506 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;5507 pFpuCtx->aRegs[7].r80 = r80Tmp;5508 }5509 5510 5511 /**5512 * Updates FSW and pushes a FPU result onto the FPU stack if no pending5513 * exception prevents it.5514 *5515 * @param pVCpu The cross context virtual CPU structure of the calling thread.5516 * @param pResult The FPU operation result to push.5517 * @param pFpuCtx The FPU context.5518 */5519 static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT5520 {5521 /* Update FSW and bail if there are pending exceptions afterwards. */5522 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;5523 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5524 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5525 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5526 {5527 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))5528 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",5529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));5530 pFpuCtx->FSW = fFsw;5531 return;5532 }5533 5534 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;5535 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))5536 {5537 /* All is fine, push the actual value. */5538 pFpuCtx->FTW |= RT_BIT(iNewTop);5539 pFpuCtx->aRegs[7].r80 = pResult->r80Result;5540 }5541 else if (pFpuCtx->FCW & X86_FCW_IM)5542 {5543 /* Masked stack overflow, push QNaN. */5544 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;5545 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);5546 }5547 else5548 {5549 /* Raise stack overflow, don't push anything. */5550 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;5551 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;5552 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",5553 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5554 return;5555 }5556 5557 fFsw &= ~X86_FSW_TOP_MASK;5558 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;5559 pFpuCtx->FSW = fFsw;5560 5561 iemFpuRotateStackPush(pFpuCtx);5562 RT_NOREF(pVCpu);5563 }5564 5565 5566 /**5567 * Stores a result in a FPU register and updates the FSW and FTW.5568 *5569 * @param pVCpu The cross context virtual CPU structure of the calling thread.5570 * @param pFpuCtx The FPU context.5571 * @param pResult The result to store.5572 * @param iStReg Which FPU register to store it in.5573 */5574 static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT5575 {5576 Assert(iStReg < 8);5577 uint16_t fNewFsw = pFpuCtx->FSW;5578 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;5579 fNewFsw &= ~X86_FSW_C_MASK;5580 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5581 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5582 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",5583 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));5584 pFpuCtx->FSW = fNewFsw;5585 pFpuCtx->FTW |= RT_BIT(iReg);5586 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;5587 RT_NOREF(pVCpu);5588 }5589 5590 5591 /**5592 * Only updates the FPU status word (FSW) with the result of the current5593 * instruction.5594 *5595 * @param pVCpu The cross context virtual CPU structure of the calling thread.5596 * @param pFpuCtx The FPU context.5597 * @param u16FSW The FSW output of the current instruction.5598 */5599 static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT5600 {5601 uint16_t fNewFsw = pFpuCtx->FSW;5602 fNewFsw &= ~X86_FSW_C_MASK;5603 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;5604 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5605 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",5606 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));5607 pFpuCtx->FSW = fNewFsw;5608 RT_NOREF(pVCpu);5609 }5610 5611 5612 /**5613 * Pops one item off the FPU stack if no pending exception prevents it.5614 *5615 * @param pFpuCtx The FPU context.5616 */5617 static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT5618 {5619 /* Check pending exceptions. */5620 uint16_t uFSW = pFpuCtx->FSW;5621 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5622 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5623 return;5624 5625 /* TOP--. */5626 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;5627 uFSW &= ~X86_FSW_TOP_MASK;5628 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;5629 pFpuCtx->FSW = uFSW;5630 5631 /* Mark the previous ST0 as empty. */5632 iOldTop >>= X86_FSW_TOP_SHIFT;5633 pFpuCtx->FTW &= ~RT_BIT(iOldTop);5634 5635 /* Rotate the registers. */5636 iemFpuRotateStackPop(pFpuCtx);5637 }5638 5639 5640 /**5641 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.5642 *5643 * @param pVCpu The cross context virtual CPU structure of the calling thread.5644 * @param pResult The FPU operation result to push.5645 * @param uFpuOpcode The FPU opcode value.5646 */5647 void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT5648 {5649 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5650 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5651 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);5652 }5653 5654 5655 /**5656 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,5657 * and sets FPUDP and FPUDS.5658 *5659 * @param pVCpu The cross context virtual CPU structure of the calling thread.5660 * @param pResult The FPU operation result to push.5661 * @param iEffSeg The effective segment register.5662 * @param GCPtrEff The effective address relative to @a iEffSeg.5663 * @param uFpuOpcode The FPU opcode value.5664 */5665 void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,5666 uint16_t uFpuOpcode) RT_NOEXCEPT5667 {5668 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5669 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5670 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5671 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);5672 }5673 5674 5675 /**5676 * Replace ST0 with the first value and push the second onto the FPU stack,5677 * unless a pending exception prevents it.5678 *5679 * @param pVCpu The cross context virtual CPU structure of the calling thread.5680 * @param pResult The FPU operation result to store and push.5681 * @param uFpuOpcode The FPU opcode value.5682 */5683 void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT5684 {5685 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5686 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5687 5688 /* Update FSW and bail if there are pending exceptions afterwards. */5689 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;5690 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5691 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5692 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5693 {5694 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5695 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",5696 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));5697 pFpuCtx->FSW = fFsw;5698 return;5699 }5700 5701 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;5702 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))5703 {5704 /* All is fine, push the actual value. */5705 pFpuCtx->FTW |= RT_BIT(iNewTop);5706 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;5707 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;5708 }5709 else if (pFpuCtx->FCW & X86_FCW_IM)5710 {5711 /* Masked stack overflow, push QNaN. */5712 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;5713 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);5714 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);5715 }5716 else5717 {5718 /* Raise stack overflow, don't push anything. */5719 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;5720 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;5721 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",5722 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5723 return;5724 }5725 5726 fFsw &= ~X86_FSW_TOP_MASK;5727 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;5728 pFpuCtx->FSW = fFsw;5729 5730 iemFpuRotateStackPush(pFpuCtx);5731 }5732 5733 5734 /**5735 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and5736 * FOP.5737 *5738 * @param pVCpu The cross context virtual CPU structure of the calling thread.5739 * @param pResult The result to store.5740 * @param iStReg Which FPU register to store it in.5741 * @param uFpuOpcode The FPU opcode value.5742 */5743 void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5744 {5745 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5746 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5747 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5748 }5749 5750 5751 /**5752 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and5753 * FOP, and then pops the stack.5754 *5755 * @param pVCpu The cross context virtual CPU structure of the calling thread.5756 * @param pResult The result to store.5757 * @param iStReg Which FPU register to store it in.5758 * @param uFpuOpcode The FPU opcode value.5759 */5760 void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5761 {5762 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5763 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5764 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5765 iemFpuMaybePopOne(pFpuCtx);5766 }5767 5768 5769 /**5770 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,5771 * FPUDP, and FPUDS.5772 *5773 * @param pVCpu The cross context virtual CPU structure of the calling thread.5774 * @param pResult The result to store.5775 * @param iStReg Which FPU register to store it in.5776 * @param iEffSeg The effective memory operand selector register.5777 * @param GCPtrEff The effective memory operand offset.5778 * @param uFpuOpcode The FPU opcode value.5779 */5780 void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,5781 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5782 {5783 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5784 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5785 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5786 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5787 }5788 5789 5790 /**5791 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,5792 * FPUDP, and FPUDS, and then pops the stack.5793 *5794 * @param pVCpu The cross context virtual CPU structure of the calling thread.5795 * @param pResult The result to store.5796 * @param iStReg Which FPU register to store it in.5797 * @param iEffSeg The effective memory operand selector register.5798 * @param GCPtrEff The effective memory operand offset.5799 * @param uFpuOpcode The FPU opcode value.5800 */5801 void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,5802 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5803 {5804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5805 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5806 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5807 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5808 iemFpuMaybePopOne(pFpuCtx);5809 }5810 5811 5812 /**5813 * Updates the FOP, FPUIP, and FPUCS. For FNOP.5814 *5815 * @param pVCpu The cross context virtual CPU structure of the calling thread.5816 * @param uFpuOpcode The FPU opcode value.5817 */5818 void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT5819 {5820 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5821 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5822 }5823 5824 5825 /**5826 * Updates the FSW, FOP, FPUIP, and FPUCS.5827 *5828 * @param pVCpu The cross context virtual CPU structure of the calling thread.5829 * @param u16FSW The FSW from the current instruction.5830 * @param uFpuOpcode The FPU opcode value.5831 */5832 void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5833 {5834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5835 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5836 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5837 }5838 5839 5840 /**5841 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.5842 *5843 * @param pVCpu The cross context virtual CPU structure of the calling thread.5844 * @param u16FSW The FSW from the current instruction.5845 * @param uFpuOpcode The FPU opcode value.5846 */5847 void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5848 {5849 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5850 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5851 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5852 iemFpuMaybePopOne(pFpuCtx);5853 }5854 5855 5856 /**5857 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.5858 *5859 * @param pVCpu The cross context virtual CPU structure of the calling thread.5860 * @param u16FSW The FSW from the current instruction.5861 * @param iEffSeg The effective memory operand selector register.5862 * @param GCPtrEff The effective memory operand offset.5863 * @param uFpuOpcode The FPU opcode value.5864 */5865 void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5866 {5867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5868 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5869 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5870 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5871 }5872 5873 5874 /**5875 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.5876 *5877 * @param pVCpu The cross context virtual CPU structure of the calling thread.5878 * @param u16FSW The FSW from the current instruction.5879 * @param uFpuOpcode The FPU opcode value.5880 */5881 void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5882 {5883 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5884 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5885 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5886 iemFpuMaybePopOne(pFpuCtx);5887 iemFpuMaybePopOne(pFpuCtx);5888 }5889 5890 5891 /**5892 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.5893 *5894 * @param pVCpu The cross context virtual CPU structure of the calling thread.5895 * @param u16FSW The FSW from the current instruction.5896 * @param iEffSeg The effective memory operand selector register.5897 * @param GCPtrEff The effective memory operand offset.5898 * @param uFpuOpcode The FPU opcode value.5899 */5900 void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,5901 uint16_t uFpuOpcode) RT_NOEXCEPT5902 {5903 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5904 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5905 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5906 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5907 iemFpuMaybePopOne(pFpuCtx);5908 }5909 5910 5911 /**5912 * Worker routine for raising an FPU stack underflow exception.5913 *5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.5915 * @param pFpuCtx The FPU context.5916 * @param iStReg The stack register being accessed.5917 */5918 static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)5919 {5920 Assert(iStReg < 8 || iStReg == UINT8_MAX);5921 if (pFpuCtx->FCW & X86_FCW_IM)5922 {5923 /* Masked underflow. */5924 pFpuCtx->FSW &= ~X86_FSW_C_MASK;5925 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;5926 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;5927 if (iStReg != UINT8_MAX)5928 {5929 pFpuCtx->FTW |= RT_BIT(iReg);5930 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);5931 }5932 }5933 else5934 {5935 pFpuCtx->FSW &= ~X86_FSW_C_MASK;5936 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;5937 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",5938 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5939 }5940 RT_NOREF(pVCpu);5941 }5942 5943 5944 /**5945 * Raises a FPU stack underflow exception.5946 *5947 * @param pVCpu The cross context virtual CPU structure of the calling thread.5948 * @param iStReg The destination register that should be loaded5949 * with QNaN if \#IS is not masked. Specify5950 * UINT8_MAX if none (like for fcom).5951 * @param uFpuOpcode The FPU opcode value.5952 */5953 void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5954 {5955 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5956 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5957 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5958 }5959 5960 5961 void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5962 {5963 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5964 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5965 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5966 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5967 }5968 5969 5970 void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5971 {5972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5973 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5974 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5975 iemFpuMaybePopOne(pFpuCtx);5976 }5977 5978 5979 void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,5980 uint16_t uFpuOpcode) RT_NOEXCEPT5981 {5982 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5983 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5984 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5985 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5986 iemFpuMaybePopOne(pFpuCtx);5987 }5988 5989 5990 void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT5991 {5992 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5993 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5994 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);5995 iemFpuMaybePopOne(pFpuCtx);5996 iemFpuMaybePopOne(pFpuCtx);5997 }5998 5999 6000 void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6001 {6002 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6003 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6004 6005 if (pFpuCtx->FCW & X86_FCW_IM)6006 {6007 /* Masked overflow - Push QNaN. */6008 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6009 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6010 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;6011 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6012 pFpuCtx->FTW |= RT_BIT(iNewTop);6013 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6014 iemFpuRotateStackPush(pFpuCtx);6015 }6016 else6017 {6018 /* Exception pending - don't change TOP or the register stack. */6019 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6020 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6021 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",6022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6023 }6024 }6025 6026 6027 void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6028 {6029 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6030 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6031 6032 if (pFpuCtx->FCW & X86_FCW_IM)6033 {6034 /* Masked overflow - Push QNaN. */6035 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6036 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6037 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;6038 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6039 pFpuCtx->FTW |= RT_BIT(iNewTop);6040 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);6041 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6042 iemFpuRotateStackPush(pFpuCtx);6043 }6044 else6045 {6046 /* Exception pending - don't change TOP or the register stack. */6047 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6048 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6049 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",6050 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6051 }6052 }6053 6054 6055 /**6056 * Worker routine for raising an FPU stack overflow exception on a push.6057 *6058 * @param pVCpu The cross context virtual CPU structure of the calling thread.6059 * @param pFpuCtx The FPU context.6060 */6061 static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT6062 {6063 if (pFpuCtx->FCW & X86_FCW_IM)6064 {6065 /* Masked overflow. */6066 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6067 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6068 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;6069 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6070 pFpuCtx->FTW |= RT_BIT(iNewTop);6071 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6072 iemFpuRotateStackPush(pFpuCtx);6073 }6074 else6075 {6076 /* Exception pending - don't change TOP or the register stack. */6077 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6078 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6079 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",6080 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6081 }6082 RT_NOREF(pVCpu);6083 }6084 6085 6086 /**6087 * Raises a FPU stack overflow exception on a push.6088 *6089 * @param pVCpu The cross context virtual CPU structure of the calling thread.6090 * @param uFpuOpcode The FPU opcode value.6091 */6092 void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6093 {6094 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6095 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6096 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);6097 }6098 6099 6100 /**6101 * Raises a FPU stack overflow exception on a push with a memory operand.6102 *6103 * @param pVCpu The cross context virtual CPU structure of the calling thread.6104 * @param iEffSeg The effective memory operand selector register.6105 * @param GCPtrEff The effective memory operand offset.6106 * @param uFpuOpcode The FPU opcode value.6107 */6108 void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT6109 {6110 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6111 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);6112 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6113 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);6114 }6115 6116 /** @} */6117 6118 6119 /** @name Memory access.6120 *6121 * @{6122 */6123 6124 #undef LOG_GROUP6125 #define LOG_GROUP LOG_GROUP_IEM_MEM6126 6127 /**6128 * Applies the segment limit, base and attributes.6129 *6130 * This may raise a \#GP or \#SS.6131 *6132 * @returns VBox strict status code.6133 *6134 * @param pVCpu The cross context virtual CPU structure of the calling thread.6135 * @param fAccess The kind of access which is being performed.6136 * @param iSegReg The index of the segment register to apply.6137 * This is UINT8_MAX if none (for IDT, GDT, LDT,6138 * TSS, ++).6139 * @param cbMem The access size.6140 * @param pGCPtrMem Pointer to the guest memory address to apply6141 * segmentation to. Input and output parameter.6142 */6143 VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT6144 {6145 if (iSegReg == UINT8_MAX)6146 return VINF_SUCCESS;6147 6148 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));6149 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);6150 switch (IEM_GET_CPU_MODE(pVCpu))6151 {6152 case IEMMODE_16BIT:6153 case IEMMODE_32BIT:6154 {6155 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;6156 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;6157 6158 if ( pSel->Attr.n.u1Present6159 && !pSel->Attr.n.u1Unusable)6160 {6161 Assert(pSel->Attr.n.u1DescType);6162 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))6163 {6164 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)6165 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )6166 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);6167 6168 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))6169 {6170 /** @todo CPL check. */6171 }6172 6173 /*6174 * There are two kinds of data selectors, normal and expand down.6175 */6176 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))6177 {6178 if ( GCPtrFirst32 > pSel->u32Limit6179 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */6180 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6181 }6182 else6183 {6184 /*6185 * The upper boundary is defined by the B bit, not the G bit!6186 */6187 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)6188 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))6189 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6190 }6191 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;6192 }6193 else6194 {6195 /*6196 * Code selector and usually be used to read thru, writing is6197 * only permitted in real and V8086 mode.6198 */6199 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)6200 || ( (fAccess & IEM_ACCESS_TYPE_READ)6201 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )6202 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )6203 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);6204 6205 if ( GCPtrFirst32 > pSel->u32Limit6206 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */6207 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6208 6209 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))6210 {6211 /** @todo CPL check. */6212 }6213 6214 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;6215 }6216 }6217 else6218 return iemRaiseGeneralProtectionFault0(pVCpu);6219 return VINF_SUCCESS;6220 }6221 6222 case IEMMODE_64BIT:6223 {6224 RTGCPTR GCPtrMem = *pGCPtrMem;6225 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)6226 *pGCPtrMem = GCPtrMem + pSel->u64Base;6227 6228 Assert(cbMem >= 1);6229 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))6230 return VINF_SUCCESS;6231 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.6232 * 4.12.2 "Data Limit Checks in 64-bit Mode". */6233 return iemRaiseGeneralProtectionFault0(pVCpu);6234 }6235 6236 default:6237 AssertFailedReturn(VERR_IEM_IPE_7);6238 }6239 }6240 6241 6242 /**6243 * Translates a virtual address to a physical physical address and checks if we6244 * can access the page as specified.6245 *6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.6247 * @param GCPtrMem The virtual address.6248 * @param cbAccess The access size, for raising \#PF correctly for6249 * FXSAVE and such.6250 * @param fAccess The intended access.6251 * @param pGCPhysMem Where to return the physical address.6252 */6253 VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,6254 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT6255 {6256 /** @todo Need a different PGM interface here. We're currently using6257 * generic / REM interfaces. this won't cut it for R0. */6258 /** @todo If/when PGM handles paged real-mode, we can remove the hack in6259 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault6260 * here. */6261 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));6262 PGMPTWALKFAST WalkFast;6263 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);6264 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);6265 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);6266 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);6267 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))6268 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);6269 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))6270 fQPage |= PGMQPAGE_F_USER_MODE;6271 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);6272 if (RT_SUCCESS(rc))6273 {6274 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);6275 6276 /* If the page is writable and does not have the no-exec bit set, all6277 access is allowed. Otherwise we'll have to check more carefully... */6278 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)6279 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)6280 || (WalkFast.fEffective & X86_PTE_RW)6281 || ( ( IEM_GET_CPL(pVCpu) != 36282 || (fAccess & IEM_ACCESS_WHAT_SYS))6283 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )6284 && ( (WalkFast.fEffective & X86_PTE_US)6285 || IEM_GET_CPL(pVCpu) != 36286 || (fAccess & IEM_ACCESS_WHAT_SYS) )6287 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)6288 || !(WalkFast.fEffective & X86_PTE_PAE_NX)6289 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )6290 )6291 );6292 6293 /* PGMGstQueryPageFast sets the A & D bits. */6294 /** @todo testcase: check when A and D bits are actually set by the CPU. */6295 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));6296 6297 *pGCPhysMem = WalkFast.GCPhys;6298 return VINF_SUCCESS;6299 }6300 6301 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));6302 /** @todo Check unassigned memory in unpaged mode. */6303 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT6304 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)6305 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);6306 #endif6307 *pGCPhysMem = NIL_RTGCPHYS;6308 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);6309 }6310 6311 #if 0 /*unused*/6312 /**6313 * Looks up a memory mapping entry.6314 *6315 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).6316 * @param pVCpu The cross context virtual CPU structure of the calling thread.6317 * @param pvMem The memory address.6318 * @param fAccess The access to.6319 */6320 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)6321 {6322 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));6323 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;6324 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem6325 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6326 return 0;6327 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem6328 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6329 return 1;6330 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem6331 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6332 return 2;6333 return VERR_NOT_FOUND;6334 }6335 #endif6336 6337 /**6338 * Finds a free memmap entry when using iNextMapping doesn't work.6339 *6340 * @returns Memory mapping index, 1024 on failure.6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.6342 */6343 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)6344 {6345 /*6346 * The easy case.6347 */6348 if (pVCpu->iem.s.cActiveMappings == 0)6349 {6350 pVCpu->iem.s.iNextMapping = 1;6351 return 0;6352 }6353 6354 /* There should be enough mappings for all instructions. */6355 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);6356 6357 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)6358 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)6359 return i;6360 6361 AssertFailedReturn(1024);6362 }6363 6364 6365 /**6366 * Commits a bounce buffer that needs writing back and unmaps it.6367 *6368 * @returns Strict VBox status code.6369 * @param pVCpu The cross context virtual CPU structure of the calling thread.6370 * @param iMemMap The index of the buffer to commit.6371 * @param fPostponeFail Whether we can postpone writer failures to ring-3.6372 * Always false in ring-3, obviously.6373 */6374 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)6375 {6376 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);6377 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);6378 #ifdef IN_RING36379 Assert(!fPostponeFail);6380 RT_NOREF_PV(fPostponeFail);6381 #endif6382 6383 /*6384 * Do the writing.6385 */6386 PVMCC pVM = pVCpu->CTX_SUFF(pVM);6387 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)6388 {6389 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;6390 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;6391 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6392 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6393 {6394 /*6395 * Carefully and efficiently dealing with access handler return6396 * codes make this a little bloated.6397 */6398 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,6399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,6400 pbBuf,6401 cbFirst,6402 PGMACCESSORIGIN_IEM);6403 if (rcStrict == VINF_SUCCESS)6404 {6405 if (cbSecond)6406 {6407 rcStrict = PGMPhysWrite(pVM,6408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6409 pbBuf + cbFirst,6410 cbSecond,6411 PGMACCESSORIGIN_IEM);6412 if (rcStrict == VINF_SUCCESS)6413 { /* nothing */ }6414 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6415 {6416 LogEx(LOG_GROUP_IEM,6417 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",6418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6420 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6421 }6422 #ifndef IN_RING36423 else if (fPostponeFail)6424 {6425 LogEx(LOG_GROUP_IEM,6426 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6429 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;6430 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6431 return iemSetPassUpStatus(pVCpu, rcStrict);6432 }6433 #endif6434 else6435 {6436 LogEx(LOG_GROUP_IEM,6437 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6440 return rcStrict;6441 }6442 }6443 }6444 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6445 {6446 if (!cbSecond)6447 {6448 LogEx(LOG_GROUP_IEM,6449 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",6450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));6451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6452 }6453 else6454 {6455 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,6456 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6457 pbBuf + cbFirst,6458 cbSecond,6459 PGMACCESSORIGIN_IEM);6460 if (rcStrict2 == VINF_SUCCESS)6461 {6462 LogEx(LOG_GROUP_IEM,6463 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",6464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6466 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6467 }6468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))6469 {6470 LogEx(LOG_GROUP_IEM,6471 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",6472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));6474 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);6475 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6476 }6477 #ifndef IN_RING36478 else if (fPostponeFail)6479 {6480 LogEx(LOG_GROUP_IEM,6481 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6484 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;6485 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6486 return iemSetPassUpStatus(pVCpu, rcStrict);6487 }6488 #endif6489 else6490 {6491 LogEx(LOG_GROUP_IEM,6492 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));6495 return rcStrict2;6496 }6497 }6498 }6499 #ifndef IN_RING36500 else if (fPostponeFail)6501 {6502 LogEx(LOG_GROUP_IEM,6503 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6506 if (!cbSecond)6507 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;6508 else6509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;6510 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6511 return iemSetPassUpStatus(pVCpu, rcStrict);6512 }6513 #endif6514 else6515 {6516 LogEx(LOG_GROUP_IEM,6517 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",6518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6519 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6520 return rcStrict;6521 }6522 }6523 else6524 {6525 /*6526 * No access handlers, much simpler.6527 */6528 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);6529 if (RT_SUCCESS(rc))6530 {6531 if (cbSecond)6532 {6533 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);6534 if (RT_SUCCESS(rc))6535 { /* likely */ }6536 else6537 {6538 LogEx(LOG_GROUP_IEM,6539 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6540 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6541 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));6542 return rc;6543 }6544 }6545 }6546 else6547 {6548 LogEx(LOG_GROUP_IEM,6549 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",6550 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,6551 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6552 return rc;6553 }6554 }6555 }6556 6557 #if defined(IEM_LOG_MEMORY_WRITES)6558 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,6559 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));6560 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)6561 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6562 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),6563 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));6564 6565 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;6566 g_cbIemWrote = cbWrote;6567 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));6568 #endif6569 6570 /*6571 * Free the mapping entry.6572 */6573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;6574 Assert(pVCpu->iem.s.cActiveMappings != 0);6575 pVCpu->iem.s.cActiveMappings--;6576 return VINF_SUCCESS;6577 }6578 6579 6580 /**6581 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.6582 */6583 DECL_FORCE_INLINE(uint32_t)6584 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)6585 {6586 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;6587 if (fAccess & IEM_ACCESS_TYPE_WRITE)6588 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);6589 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);6590 }6591 6592 6593 /**6594 * iemMemMap worker that deals with a request crossing pages.6595 */6596 static VBOXSTRICTRC6597 iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,6598 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)6599 {6600 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);6601 Assert(cbMem <= GUEST_PAGE_SIZE);6602 6603 /*6604 * Do the address translations.6605 */6606 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);6607 RTGCPHYS GCPhysFirst;6608 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);6609 if (rcStrict != VINF_SUCCESS)6610 return rcStrict;6611 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));6612 6613 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;6614 RTGCPHYS GCPhysSecond;6615 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,6616 cbSecondPage, fAccess, &GCPhysSecond);6617 if (rcStrict != VINF_SUCCESS)6618 return rcStrict;6619 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);6620 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */6621 6622 PVMCC pVM = pVCpu->CTX_SUFF(pVM);6623 6624 /*6625 * Check for data breakpoints.6626 */6627 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))6628 { /* likely */ }6629 else6630 {6631 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);6632 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,6633 cbSecondPage, fAccess);6634 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);6635 if (fDataBps > 1)6636 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",6637 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));6638 }6639 6640 /*6641 * Read in the current memory content if it's a read, execute or partial6642 * write access.6643 */6644 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6645 6646 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))6647 {6648 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6649 {6650 /*6651 * Must carefully deal with access handler status codes here,6652 * makes the code a bit bloated.6653 */6654 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);6655 if (rcStrict == VINF_SUCCESS)6656 {6657 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);6658 if (rcStrict == VINF_SUCCESS)6659 { /*likely */ }6660 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6661 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6662 else6663 {6664 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",6665 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));6666 return rcStrict;6667 }6668 }6669 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6670 {6671 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);6672 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))6673 {6674 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);6675 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6676 }6677 else6678 {6679 LogEx(LOG_GROUP_IEM,6680 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",6681 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));6682 return rcStrict2;6683 }6684 }6685 else6686 {6687 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6688 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6689 return rcStrict;6690 }6691 }6692 else6693 {6694 /*6695 * No informational status codes here, much more straight forward.6696 */6697 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);6698 if (RT_SUCCESS(rc))6699 {6700 Assert(rc == VINF_SUCCESS);6701 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);6702 if (RT_SUCCESS(rc))6703 Assert(rc == VINF_SUCCESS);6704 else6705 {6706 LogEx(LOG_GROUP_IEM,6707 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));6708 return rc;6709 }6710 }6711 else6712 {6713 LogEx(LOG_GROUP_IEM,6714 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));6715 return rc;6716 }6717 }6718 }6719 #ifdef VBOX_STRICT6720 else6721 memset(pbBuf, 0xcc, cbMem);6722 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))6723 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);6724 #endif6725 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);6726 6727 /*6728 * Commit the bounce buffer entry.6729 */6730 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;6731 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;6732 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;6733 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;6734 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;6735 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;6736 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;6737 pVCpu->iem.s.iNextMapping = iMemMap + 1;6738 pVCpu->iem.s.cActiveMappings++;6739 6740 *ppvMem = pbBuf;6741 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);6742 return VINF_SUCCESS;6743 }6744 6745 6746 /**6747 * iemMemMap woker that deals with iemMemPageMap failures.6748 */6749 static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,6750 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)6751 {6752 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);6753 6754 /*6755 * Filter out conditions we can handle and the ones which shouldn't happen.6756 */6757 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE6758 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL6759 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)6760 {6761 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);6762 return rcMap;6763 }6764 pVCpu->iem.s.cPotentialExits++;6765 6766 /*6767 * Read in the current memory content if it's a read, execute or partial6768 * write access.6769 */6770 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6771 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))6772 {6773 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)6774 memset(pbBuf, 0xff, cbMem);6775 else6776 {6777 int rc;6778 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6779 {6780 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);6781 if (rcStrict == VINF_SUCCESS)6782 { /* nothing */ }6783 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6784 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6785 else6786 {6787 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6788 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6789 return rcStrict;6790 }6791 }6792 else6793 {6794 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);6795 if (RT_SUCCESS(rc))6796 { /* likely */ }6797 else6798 {6799 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6800 GCPhysFirst, rc));6801 return rc;6802 }6803 }6804 }6805 }6806 #ifdef VBOX_STRICT6807 else6808 memset(pbBuf, 0xcc, cbMem);6809 #endif6810 #ifdef VBOX_STRICT6811 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))6812 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);6813 #endif6814 6815 /*6816 * Commit the bounce buffer entry.6817 */6818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;6819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;6820 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;6821 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;6822 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;6823 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;6824 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;6825 pVCpu->iem.s.iNextMapping = iMemMap + 1;6826 pVCpu->iem.s.cActiveMappings++;6827 6828 *ppvMem = pbBuf;6829 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);6830 return VINF_SUCCESS;6831 }6832 6833 6834 6835 /**6836 * Maps the specified guest memory for the given kind of access.6837 *6838 * This may be using bounce buffering of the memory if it's crossing a page6839 * boundary or if there is an access handler installed for any of it. Because6840 * of lock prefix guarantees, we're in for some extra clutter when this6841 * happens.6842 *6843 * This may raise a \#GP, \#SS, \#PF or \#AC.6844 *6845 * @returns VBox strict status code.6846 *6847 * @param pVCpu The cross context virtual CPU structure of the calling thread.6848 * @param ppvMem Where to return the pointer to the mapped memory.6849 * @param pbUnmapInfo Where to return unmap info to be passed to6850 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when6851 * done.6852 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,6853 * 8, 12, 16, 32 or 512. When used by string operations6854 * it can be up to a page.6855 * @param iSegReg The index of the segment register to use for this6856 * access. The base and limits are checked. Use UINT8_MAX6857 * to indicate that no segmentation is required (for IDT,6858 * GDT and LDT accesses).6859 * @param GCPtrMem The address of the guest memory.6860 * @param fAccess How the memory is being accessed. The6861 * IEM_ACCESS_TYPE_XXX part is used to figure out how to6862 * map the memory, while the IEM_ACCESS_WHAT_XXX part is6863 * used when raising exceptions. The IEM_ACCESS_ATOMIC and6864 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be6865 * set.6866 * @param uAlignCtl Alignment control:6867 * - Bits 15:0 is the alignment mask.6868 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,6869 * IEM_MEMMAP_F_ALIGN_SSE, and6870 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.6871 * Pass zero to skip alignment.6872 */6873 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,6874 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT6875 {6876 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);6877 6878 /*6879 * Check the input and figure out which mapping entry to use.6880 */6881 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));6882 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 946883 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );6884 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));6885 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));6886 6887 unsigned iMemMap = pVCpu->iem.s.iNextMapping;6888 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)6889 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)6890 {6891 iMemMap = iemMemMapFindFree(pVCpu);6892 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),6893 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,6894 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,6895 pVCpu->iem.s.aMemMappings[2].fAccess),6896 VERR_IEM_IPE_9);6897 }6898 6899 /*6900 * Map the memory, checking that we can actually access it. If something6901 * slightly complicated happens, fall back on bounce buffering.6902 */6903 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);6904 if (rcStrict == VINF_SUCCESS)6905 { /* likely */ }6906 else6907 return rcStrict;6908 6909 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */6910 { /* likely */ }6911 else6912 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);6913 6914 /*6915 * Alignment check.6916 */6917 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )6918 { /* likelyish */ }6919 else6920 {6921 /* Misaligned access. */6922 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)6923 {6924 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)6925 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)6926 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )6927 {6928 AssertCompile(X86_CR0_AM == X86_EFL_AC);6929 6930 if (!iemMemAreAlignmentChecksEnabled(pVCpu))6931 { /* likely */ }6932 else6933 return iemRaiseAlignmentCheckException(pVCpu);6934 }6935 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)6936 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */6937 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU6938 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as6939 * that's what FXSAVE does on a 10980xe. */6940 && iemMemAreAlignmentChecksEnabled(pVCpu))6941 return iemRaiseAlignmentCheckException(pVCpu);6942 else6943 return iemRaiseGeneralProtectionFault0(pVCpu);6944 }6945 6946 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)6947 /* If the access is atomic there are host platform alignmnet restrictions6948 we need to conform with. */6949 if ( !(fAccess & IEM_ACCESS_ATOMIC)6950 # if defined(RT_ARCH_AMD64)6951 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */6952 # elif defined(RT_ARCH_ARM64)6953 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */6954 # else6955 # error port me6956 # endif6957 )6958 { /* okay */ }6959 else6960 {6961 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));6962 pVCpu->iem.s.cMisalignedAtomics += 1;6963 return VINF_EM_EMULATE_SPLIT_LOCK;6964 }6965 #endif6966 }6967 6968 #ifdef IEM_WITH_DATA_TLB6969 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));6970 6971 /*6972 * Get the TLB entry for this page and check PT flags.6973 *6974 * We reload the TLB entry if we need to set the dirty bit (accessed6975 * should in theory always be set).6976 */6977 uint8_t *pbMem = NULL;6978 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);6979 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);6980 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);6981 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)6982 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )6983 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)6984 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )6985 {6986 # ifdef IEM_WITH_TLB_STATISTICS6987 pVCpu->iem.s.DataTlb.cTlbCoreHits++;6988 # endif6989 6990 /* If the page is either supervisor only or non-writable, we need to do6991 more careful access checks. */6992 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))6993 {6994 /* Write to read only memory? */6995 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)6996 && (fAccess & IEM_ACCESS_TYPE_WRITE)6997 && ( ( IEM_GET_CPL(pVCpu) == 36998 && !(fAccess & IEM_ACCESS_WHAT_SYS))6999 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))7000 {7001 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));7002 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);7003 }7004 7005 /* Kernel memory accessed by userland? */7006 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)7007 && IEM_GET_CPL(pVCpu) == 37008 && !(fAccess & IEM_ACCESS_WHAT_SYS))7009 {7010 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));7011 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);7012 }7013 }7014 7015 /* Look up the physical page info if necessary. */7016 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)7017 # ifdef IN_RING37018 pbMem = pTlbe->pbMappingR3;7019 # else7020 pbMem = NULL;7021 # endif7022 else7023 {7024 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))7025 { /* likely */ }7026 else7027 IEMTlbInvalidateAllPhysicalSlow(pVCpu);7028 pTlbe->pbMappingR3 = NULL;7029 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;7030 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7031 &pbMem, &pTlbe->fFlagsAndPhysRev);7032 AssertRCReturn(rc, rc);7033 # ifdef IN_RING37034 pTlbe->pbMappingR3 = pbMem;7035 # endif7036 }7037 }7038 else7039 {7040 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;7041 7042 /* This page table walking will set A bits as required by the access while performing the walk.7043 ASSUMES these are set when the address is translated rather than on commit... */7044 /** @todo testcase: check when A bits are actually set by the CPU for code. */7045 PGMPTWALKFAST WalkFast;7046 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);7047 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);7048 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);7049 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);7050 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))7051 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);7052 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))7053 fQPage |= PGMQPAGE_F_USER_MODE;7054 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);7055 if (RT_SUCCESS(rc))7056 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);7057 else7058 {7059 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));7060 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7061 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)7062 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);7063 # endif7064 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);7065 }7066 7067 uint32_t fDataBps;7068 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))7069 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))7070 {7071 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)7072 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */7073 {7074 pTlbe--;7075 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;7076 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7077 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7078 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7079 else7080 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));7081 # endif7082 }7083 else7084 {7085 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;7086 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;7087 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7088 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7089 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7090 else7091 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);7092 # endif7093 }7094 }7095 else7096 {7097 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses7098 to the page with the data access breakpoint armed on it to pass thru here. */7099 if (fDataBps > 1)7100 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",7101 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));7102 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);7103 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;7104 pTlbe->uTag = uTagNoRev;7105 }7106 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)7107 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);7108 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;7109 pTlbe->GCPhys = GCPhysPg;7110 pTlbe->pbMappingR3 = NULL;7111 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));7112 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));7113 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)7114 || !(fAccess & IEM_ACCESS_TYPE_WRITE)7115 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);7116 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)7117 || IEM_GET_CPL(pVCpu) != 37118 || (fAccess & IEM_ACCESS_WHAT_SYS));7119 7120 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)7121 {7122 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))7123 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7124 else7125 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7126 }7127 7128 /* Resolve the physical address. */7129 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));7130 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7131 &pbMem, &pTlbe->fFlagsAndPhysRev);7132 AssertRCReturn(rc, rc);7133 # ifdef IN_RING37134 pTlbe->pbMappingR3 = pbMem;7135 # endif7136 }7137 7138 /*7139 * Check the physical page level access and mapping.7140 */7141 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))7142 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)7143 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )7144 { /* probably likely */ }7145 else7146 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,7147 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,7148 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED7149 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL7150 : VERR_PGM_PHYS_TLB_CATCH_WRITE);7151 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */7152 7153 if (pbMem)7154 {7155 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));7156 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7157 fAccess |= IEM_ACCESS_NOT_LOCKED;7158 }7159 else7160 {7161 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));7162 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7163 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7164 if (rcStrict != VINF_SUCCESS)7165 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7166 }7167 7168 void * const pvMem = pbMem;7169 7170 if (fAccess & IEM_ACCESS_TYPE_WRITE)7171 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7172 if (fAccess & IEM_ACCESS_TYPE_READ)7173 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7174 7175 #else /* !IEM_WITH_DATA_TLB */7176 7177 RTGCPHYS GCPhysFirst;7178 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);7179 if (rcStrict != VINF_SUCCESS)7180 return rcStrict;7181 7182 if (fAccess & IEM_ACCESS_TYPE_WRITE)7183 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7184 if (fAccess & IEM_ACCESS_TYPE_READ)7185 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7186 7187 void *pvMem;7188 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7189 if (rcStrict != VINF_SUCCESS)7190 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7191 7192 #endif /* !IEM_WITH_DATA_TLB */7193 7194 /*7195 * Fill in the mapping table entry.7196 */7197 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;7198 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;7199 pVCpu->iem.s.iNextMapping = iMemMap + 1;7200 pVCpu->iem.s.cActiveMappings += 1;7201 7202 *ppvMem = pvMem;7203 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7204 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);7205 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);7206 7207 return VINF_SUCCESS;7208 }7209 7210 7211 /**7212 * Commits the guest memory if bounce buffered and unmaps it.7213 *7214 * @returns Strict VBox status code.7215 * @param pVCpu The cross context virtual CPU structure of the calling thread.7216 * @param bUnmapInfo Unmap info set by iemMemMap.7217 */7218 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7219 {7220 uintptr_t const iMemMap = bUnmapInfo & 0x7;7221 AssertMsgReturn( (bUnmapInfo & 0x08)7222 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7223 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),7224 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),7225 VERR_NOT_FOUND);7226 7227 /* If it's bounce buffered, we may need to write back the buffer. */7228 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7229 {7230 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7231 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);7232 }7233 /* Otherwise unlock it. */7234 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7235 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7236 7237 /* Free the entry. */7238 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7239 Assert(pVCpu->iem.s.cActiveMappings != 0);7240 pVCpu->iem.s.cActiveMappings--;7241 return VINF_SUCCESS;7242 }7243 7244 7245 /**7246 * Rolls back the guest memory (conceptually only) and unmaps it.7247 *7248 * @param pVCpu The cross context virtual CPU structure of the calling thread.7249 * @param bUnmapInfo Unmap info set by iemMemMap.7250 */7251 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7252 {7253 uintptr_t const iMemMap = bUnmapInfo & 0x7;7254 AssertMsgReturnVoid( (bUnmapInfo & 0x08)7255 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7256 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7257 == ((unsigned)bUnmapInfo >> 4),7258 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));7259 7260 /* Unlock it if necessary. */7261 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7262 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7263 7264 /* Free the entry. */7265 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7266 Assert(pVCpu->iem.s.cActiveMappings != 0);7267 pVCpu->iem.s.cActiveMappings--;7268 }7269 7270 #ifdef IEM_WITH_SETJMP7271 7272 /**7273 * Maps the specified guest memory for the given kind of access, longjmp on7274 * error.7275 *7276 * This may be using bounce buffering of the memory if it's crossing a page7277 * boundary or if there is an access handler installed for any of it. Because7278 * of lock prefix guarantees, we're in for some extra clutter when this7279 * happens.7280 *7281 * This may raise a \#GP, \#SS, \#PF or \#AC.7282 *7283 * @returns Pointer to the mapped memory.7284 *7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.7286 * @param bUnmapInfo Where to return unmap info to be passed to7287 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,7288 * iemMemCommitAndUnmapWoSafeJmp,7289 * iemMemCommitAndUnmapRoSafeJmp,7290 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap7291 * when done.7292 * @param cbMem The number of bytes to map. This is usually 1,7293 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by7294 * string operations it can be up to a page.7295 * @param iSegReg The index of the segment register to use for7296 * this access. The base and limits are checked.7297 * Use UINT8_MAX to indicate that no segmentation7298 * is required (for IDT, GDT and LDT accesses).7299 * @param GCPtrMem The address of the guest memory.7300 * @param fAccess How the memory is being accessed. The7301 * IEM_ACCESS_TYPE_XXX part is used to figure out how to7302 * map the memory, while the IEM_ACCESS_WHAT_XXX part is7303 * used when raising exceptions. The IEM_ACCESS_ATOMIC and7304 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be7305 * set.7306 * @param uAlignCtl Alignment control:7307 * - Bits 15:0 is the alignment mask.7308 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,7309 * IEM_MEMMAP_F_ALIGN_SSE, and7310 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.7311 * Pass zero to skip alignment.7312 * @tparam a_fSafe Whether this is a call from "safe" fallback function in7313 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that7314 * needs counting as such in the statistics.7315 */7316 template<bool a_fSafeCall = false>7317 static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,7318 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP7319 {7320 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);7321 7322 /*7323 * Check the input, check segment access and adjust address7324 * with segment base.7325 */7326 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */7327 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));7328 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));7329 7330 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);7331 if (rcStrict == VINF_SUCCESS) { /*likely*/ }7332 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7333 7334 /*7335 * Alignment check.7336 */7337 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )7338 { /* likelyish */ }7339 else7340 {7341 /* Misaligned access. */7342 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)7343 {7344 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)7345 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)7346 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )7347 {7348 AssertCompile(X86_CR0_AM == X86_EFL_AC);7349 7350 if (iemMemAreAlignmentChecksEnabled(pVCpu))7351 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7352 }7353 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)7354 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */7355 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU7356 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as7357 * that's what FXSAVE does on a 10980xe. */7358 && iemMemAreAlignmentChecksEnabled(pVCpu))7359 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7360 else7361 iemRaiseGeneralProtectionFault0Jmp(pVCpu);7362 }7363 7364 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)7365 /* If the access is atomic there are host platform alignmnet restrictions7366 we need to conform with. */7367 if ( !(fAccess & IEM_ACCESS_ATOMIC)7368 # if defined(RT_ARCH_AMD64)7369 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */7370 # elif defined(RT_ARCH_ARM64)7371 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */7372 # else7373 # error port me7374 # endif7375 )7376 { /* okay */ }7377 else7378 {7379 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));7380 pVCpu->iem.s.cMisalignedAtomics += 1;7381 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);7382 }7383 #endif7384 }7385 7386 /*7387 * Figure out which mapping entry to use.7388 */7389 unsigned iMemMap = pVCpu->iem.s.iNextMapping;7390 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7391 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)7392 {7393 iMemMap = iemMemMapFindFree(pVCpu);7394 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),7395 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,7396 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,7397 pVCpu->iem.s.aMemMappings[2].fAccess),7398 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));7399 }7400 7401 /*7402 * Crossing a page boundary?7403 */7404 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)7405 { /* No (likely). */ }7406 else7407 {7408 void *pvMem;7409 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);7410 if (rcStrict == VINF_SUCCESS)7411 return pvMem;7412 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7413 }7414 7415 #ifdef IEM_WITH_DATA_TLB7416 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));7417 7418 /*7419 * Get the TLB entry for this page checking that it has the A & D bits7420 * set as per fAccess flags.7421 */7422 /** @todo make the caller pass these in with fAccess. */7423 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 37424 ? IEMTLBE_F_PT_NO_USER : 0;7425 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE7426 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY7427 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)7428 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)7429 ? IEMTLBE_F_PT_NO_WRITE : 0)7430 : 0;7431 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;7432 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);7433 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);7434 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);7435 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)7436 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )7437 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)7438 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )7439 {7440 # ifdef IEM_WITH_TLB_STATISTICS7441 if (a_fSafeCall)7442 pVCpu->iem.s.DataTlb.cTlbSafeHits++;7443 else7444 pVCpu->iem.s.DataTlb.cTlbCoreHits++;7445 # endif7446 }7447 else7448 {7449 if (a_fSafeCall)7450 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;7451 else7452 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;7453 7454 /* This page table walking will set A and D bits as required by the7455 access while performing the walk.7456 ASSUMES these are set when the address is translated rather than on commit... */7457 /** @todo testcase: check when A and D bits are actually set by the CPU. */7458 PGMPTWALKFAST WalkFast;7459 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);7460 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);7461 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);7462 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);7463 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))7464 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);7465 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))7466 fQPage |= PGMQPAGE_F_USER_MODE;7467 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);7468 if (RT_SUCCESS(rc))7469 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);7470 else7471 {7472 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));7473 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7474 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)7475 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);7476 # endif7477 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);7478 }7479 7480 uint32_t fDataBps;7481 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))7482 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))7483 {7484 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)7485 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */7486 {7487 pTlbe--;7488 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;7489 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7490 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7491 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7492 else7493 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));7494 # endif7495 }7496 else7497 {7498 if (a_fSafeCall)7499 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;7500 else7501 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;7502 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;7503 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7504 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7505 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7506 else7507 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);7508 # endif7509 }7510 }7511 else7512 {7513 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses7514 to the page with the data access breakpoint armed on it to pass thru here. */7515 if (fDataBps > 1)7516 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",7517 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));7518 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);7519 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;7520 pTlbe->uTag = uTagNoRev;7521 }7522 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)7523 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);7524 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;7525 pTlbe->GCPhys = GCPhysPg;7526 pTlbe->pbMappingR3 = NULL;7527 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));7528 Assert( !(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)7529 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);7530 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));7531 7532 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)7533 {7534 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))7535 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7536 else7537 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7538 }7539 7540 /* Resolve the physical address. */7541 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));7542 uint8_t *pbMemFullLoad = NULL;7543 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7544 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);7545 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));7546 # ifdef IN_RING37547 pTlbe->pbMappingR3 = pbMemFullLoad;7548 # endif7549 }7550 7551 /*7552 * Check the flags and physical revision.7553 * Note! This will revalidate the uTlbPhysRev after a full load. This is7554 * just to keep the code structure simple (i.e. avoid gotos or similar).7555 */7556 uint8_t *pbMem;7557 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))7558 == pVCpu->iem.s.DataTlb.uTlbPhysRev)7559 # ifdef IN_RING37560 pbMem = pTlbe->pbMappingR3;7561 # else7562 pbMem = NULL;7563 # endif7564 else7565 {7566 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));7567 7568 /*7569 * Okay, something isn't quite right or needs refreshing.7570 */7571 /* Write to read only memory? */7572 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)7573 {7574 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));7575 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7576 /** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether7577 * to trigger an \#PG or a VM nested paging exit here yet! */7578 if (Walk.fFailed & PGM_WALKFAIL_EPT)7579 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);7580 # endif7581 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);7582 }7583 7584 /* Kernel memory accessed by userland? */7585 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)7586 {7587 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));7588 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7589 /** @todo TLB: See above. */7590 if (Walk.fFailed & PGM_WALKFAIL_EPT)7591 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);7592 # endif7593 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);7594 }7595 7596 /*7597 * Check if the physical page info needs updating.7598 */7599 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)7600 # ifdef IN_RING37601 pbMem = pTlbe->pbMappingR3;7602 # else7603 pbMem = NULL;7604 # endif7605 else7606 {7607 pTlbe->pbMappingR3 = NULL;7608 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;7609 pbMem = NULL;7610 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7611 &pbMem, &pTlbe->fFlagsAndPhysRev);7612 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));7613 # ifdef IN_RING37614 pTlbe->pbMappingR3 = pbMem;7615 # endif7616 }7617 7618 /*7619 * Check the physical page level access and mapping.7620 */7621 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))7622 { /* probably likely */ }7623 else7624 {7625 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,7626 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,7627 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED7628 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL7629 : VERR_PGM_PHYS_TLB_CATCH_WRITE);7630 if (rcStrict == VINF_SUCCESS)7631 return pbMem;7632 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7633 }7634 }7635 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */7636 7637 if (pbMem)7638 {7639 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));7640 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7641 fAccess |= IEM_ACCESS_NOT_LOCKED;7642 }7643 else7644 {7645 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));7646 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7647 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7648 if (rcStrict == VINF_SUCCESS)7649 {7650 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7651 return pbMem;7652 }7653 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7654 }7655 7656 void * const pvMem = pbMem;7657 7658 if (fAccess & IEM_ACCESS_TYPE_WRITE)7659 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7660 if (fAccess & IEM_ACCESS_TYPE_READ)7661 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7662 7663 #else /* !IEM_WITH_DATA_TLB */7664 7665 7666 RTGCPHYS GCPhysFirst;7667 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);7668 if (rcStrict == VINF_SUCCESS) { /*likely*/ }7669 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7670 7671 if (fAccess & IEM_ACCESS_TYPE_WRITE)7672 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7673 if (fAccess & IEM_ACCESS_TYPE_READ)7674 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7675 7676 void *pvMem;7677 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7678 if (rcStrict == VINF_SUCCESS)7679 { /* likely */ }7680 else7681 {7682 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7683 if (rcStrict == VINF_SUCCESS)7684 return pvMem;7685 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7686 }7687 7688 #endif /* !IEM_WITH_DATA_TLB */7689 7690 /*7691 * Fill in the mapping table entry.7692 */7693 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;7694 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;7695 pVCpu->iem.s.iNextMapping = iMemMap + 1;7696 pVCpu->iem.s.cActiveMappings++;7697 7698 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7699 return pvMem;7700 }7701 7702 7703 /** @see iemMemMapJmp */7704 static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,7705 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP7706 {7707 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);7708 }7709 7710 7711 /**7712 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.7713 *7714 * @param pVCpu The cross context virtual CPU structure of the calling thread.7715 * @param pvMem The mapping.7716 * @param fAccess The kind of access.7717 */7718 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7719 {7720 uintptr_t const iMemMap = bUnmapInfo & 0x7;7721 AssertMsgReturnVoid( (bUnmapInfo & 0x08)7722 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7723 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7724 == ((unsigned)bUnmapInfo >> 4),7725 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));7726 7727 /* If it's bounce buffered, we may need to write back the buffer. */7728 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7729 {7730 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7731 {7732 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);7733 if (rcStrict == VINF_SUCCESS)7734 return;7735 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7736 }7737 }7738 /* Otherwise unlock it. */7739 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7740 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7741 7742 /* Free the entry. */7743 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7744 Assert(pVCpu->iem.s.cActiveMappings != 0);7745 pVCpu->iem.s.cActiveMappings--;7746 }7747 7748 7749 /** Fallback for iemMemCommitAndUnmapRwJmp. */7750 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7751 {7752 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));7753 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7754 }7755 7756 7757 /** Fallback for iemMemCommitAndUnmapAtJmp. */7758 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7759 {7760 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));7761 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7762 }7763 7764 7765 /** Fallback for iemMemCommitAndUnmapWoJmp. */7766 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7767 {7768 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);7769 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7770 }7771 7772 7773 /** Fallback for iemMemCommitAndUnmapRoJmp. */7774 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7775 {7776 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);7777 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7778 }7779 7780 7781 /** Fallback for iemMemRollbackAndUnmapWo. */7782 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7783 {7784 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);7785 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);7786 }7787 7788 #endif /* IEM_WITH_SETJMP */7789 7790 #ifndef IN_RING37791 /**7792 * Commits the guest memory if bounce buffered and unmaps it, if any bounce7793 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).7794 *7795 * Allows the instruction to be completed and retired, while the IEM user will7796 * return to ring-3 immediately afterwards and do the postponed writes there.7797 *7798 * @returns VBox status code (no strict statuses). Caller must check7799 * VMCPU_FF_IEM before repeating string instructions and similar stuff.7800 * @param pVCpu The cross context virtual CPU structure of the calling thread.7801 * @param pvMem The mapping.7802 * @param fAccess The kind of access.7803 */7804 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7805 {7806 uintptr_t const iMemMap = bUnmapInfo & 0x7;7807 AssertMsgReturn( (bUnmapInfo & 0x08)7808 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7809 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7810 == ((unsigned)bUnmapInfo >> 4),7811 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),7812 VERR_NOT_FOUND);7813 7814 /* If it's bounce buffered, we may need to write back the buffer. */7815 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7816 {7817 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7818 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);7819 }7820 /* Otherwise unlock it. */7821 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7822 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7823 7824 /* Free the entry. */7825 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7826 Assert(pVCpu->iem.s.cActiveMappings != 0);7827 pVCpu->iem.s.cActiveMappings--;7828 return VINF_SUCCESS;7829 }7830 #endif7831 7832 7833 /**7834 * Rollbacks mappings, releasing page locks and such.7835 *7836 * The caller shall only call this after checking cActiveMappings.7837 *7838 * @param pVCpu The cross context virtual CPU structure of the calling thread.7839 */7840 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT7841 {7842 Assert(pVCpu->iem.s.cActiveMappings > 0);7843 7844 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);7845 while (iMemMap-- > 0)7846 {7847 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;7848 if (fAccess != IEM_ACCESS_INVALID)7849 {7850 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));7851 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7852 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))7853 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7854 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,7855 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",7856 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,7857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));7858 pVCpu->iem.s.cActiveMappings--;7859 }7860 }7861 }7862 7863 7864 /*7865 * Instantiate R/W templates.7866 */7867 #define TMPL_MEM_WITH_STACK7868 7869 #define TMPL_MEM_TYPE uint8_t7870 #define TMPL_MEM_FN_SUFF U87871 #define TMPL_MEM_FMT_TYPE "%#04x"7872 #define TMPL_MEM_FMT_DESC "byte"7873 #include "IEMAllMemRWTmpl.cpp.h"7874 7875 #define TMPL_MEM_TYPE uint16_t7876 #define TMPL_MEM_FN_SUFF U167877 #define TMPL_MEM_FMT_TYPE "%#06x"7878 #define TMPL_MEM_FMT_DESC "word"7879 #include "IEMAllMemRWTmpl.cpp.h"7880 7881 #define TMPL_WITH_PUSH_SREG7882 #define TMPL_MEM_TYPE uint32_t7883 #define TMPL_MEM_FN_SUFF U327884 #define TMPL_MEM_FMT_TYPE "%#010x"7885 #define TMPL_MEM_FMT_DESC "dword"7886 #include "IEMAllMemRWTmpl.cpp.h"7887 #undef TMPL_WITH_PUSH_SREG7888 7889 #define TMPL_MEM_TYPE uint64_t7890 #define TMPL_MEM_FN_SUFF U647891 #define TMPL_MEM_FMT_TYPE "%#018RX64"7892 #define TMPL_MEM_FMT_DESC "qword"7893 #include "IEMAllMemRWTmpl.cpp.h"7894 7895 #undef TMPL_MEM_WITH_STACK7896 7897 #define TMPL_MEM_TYPE uint32_t7898 #define TMPL_MEM_TYPE_ALIGN 07899 #define TMPL_MEM_FN_SUFF U32NoAc7900 #define TMPL_MEM_FMT_TYPE "%#010x"7901 #define TMPL_MEM_FMT_DESC "dword"7902 #include "IEMAllMemRWTmpl.cpp.h"7903 #undef TMPL_WITH_PUSH_SREG7904 7905 #define TMPL_MEM_TYPE uint64_t7906 #define TMPL_MEM_TYPE_ALIGN 07907 #define TMPL_MEM_FN_SUFF U64NoAc7908 #define TMPL_MEM_FMT_TYPE "%#018RX64"7909 #define TMPL_MEM_FMT_DESC "qword"7910 #include "IEMAllMemRWTmpl.cpp.h"7911 7912 #define TMPL_MEM_TYPE uint64_t7913 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)7914 #define TMPL_MEM_FN_SUFF U64AlignedU1287915 #define TMPL_MEM_FMT_TYPE "%#018RX64"7916 #define TMPL_MEM_FMT_DESC "qword"7917 #include "IEMAllMemRWTmpl.cpp.h"7918 7919 /* See IEMAllMemRWTmplInline.cpp.h */7920 #define TMPL_MEM_BY_REF7921 7922 #define TMPL_MEM_TYPE RTFLOAT80U7923 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)7924 #define TMPL_MEM_FN_SUFF R807925 #define TMPL_MEM_FMT_TYPE "%.10Rhxs"7926 #define TMPL_MEM_FMT_DESC "tword"7927 #include "IEMAllMemRWTmpl.cpp.h"7928 7929 #define TMPL_MEM_TYPE RTPBCD80U7930 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */7931 #define TMPL_MEM_FN_SUFF D807932 #define TMPL_MEM_FMT_TYPE "%.10Rhxs"7933 #define TMPL_MEM_FMT_DESC "tword"7934 #include "IEMAllMemRWTmpl.cpp.h"7935 7936 #define TMPL_MEM_TYPE RTUINT128U7937 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)7938 #define TMPL_MEM_FN_SUFF U1287939 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7940 #define TMPL_MEM_FMT_DESC "dqword"7941 #include "IEMAllMemRWTmpl.cpp.h"7942 7943 #define TMPL_MEM_TYPE RTUINT128U7944 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)7945 #define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)7946 #define TMPL_MEM_FN_SUFF U128AlignedSse7947 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7948 #define TMPL_MEM_FMT_DESC "dqword"7949 #include "IEMAllMemRWTmpl.cpp.h"7950 7951 #define TMPL_MEM_TYPE RTUINT128U7952 #define TMPL_MEM_TYPE_ALIGN 07953 #define TMPL_MEM_FN_SUFF U128NoAc7954 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7955 #define TMPL_MEM_FMT_DESC "dqword"7956 #include "IEMAllMemRWTmpl.cpp.h"7957 7958 #define TMPL_MEM_TYPE RTUINT256U7959 #define TMPL_MEM_TYPE_ALIGN 07960 #define TMPL_MEM_FN_SUFF U256NoAc7961 #define TMPL_MEM_FMT_TYPE "%.32Rhxs"7962 #define TMPL_MEM_FMT_DESC "qqword"7963 #include "IEMAllMemRWTmpl.cpp.h"7964 7965 #define TMPL_MEM_TYPE RTUINT256U7966 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)7967 #define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP7968 #define TMPL_MEM_FN_SUFF U256AlignedAvx7969 #define TMPL_MEM_FMT_TYPE "%.32Rhxs"7970 #define TMPL_MEM_FMT_DESC "qqword"7971 #include "IEMAllMemRWTmpl.cpp.h"7972 7973 /**7974 * Fetches a data dword and zero extends it to a qword.7975 *7976 * @returns Strict VBox status code.7977 * @param pVCpu The cross context virtual CPU structure of the calling thread.7978 * @param pu64Dst Where to return the qword.7979 * @param iSegReg The index of the segment register to use for7980 * this access. The base and limits are checked.7981 * @param GCPtrMem The address of the guest memory.7982 */7983 VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT7984 {7985 /* The lazy approach for now... */7986 uint8_t bUnmapInfo;7987 uint32_t const *pu32Src;7988 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,7989 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);7990 if (rc == VINF_SUCCESS)7991 {7992 *pu64Dst = *pu32Src;7993 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);7994 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));7995 }7996 return rc;7997 }7998 7999 8000 #ifdef SOME_UNUSED_FUNCTION8001 /**8002 * Fetches a data dword and sign extends it to a qword.8003 *8004 * @returns Strict VBox status code.8005 * @param pVCpu The cross context virtual CPU structure of the calling thread.8006 * @param pu64Dst Where to return the sign extended value.8007 * @param iSegReg The index of the segment register to use for8008 * this access. The base and limits are checked.8009 * @param GCPtrMem The address of the guest memory.8010 */8011 VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8012 {8013 /* The lazy approach for now... */8014 uint8_t bUnmapInfo;8015 int32_t const *pi32Src;8016 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,8017 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);8018 if (rc == VINF_SUCCESS)8019 {8020 *pu64Dst = *pi32Src;8021 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8022 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));8023 }8024 #ifdef __GNUC__ /* warning: GCC may be a royal pain */8025 else8026 *pu64Dst = 0;8027 #endif8028 return rc;8029 }8030 #endif8031 8032 8033 /**8034 * Fetches a descriptor register (lgdt, lidt).8035 *8036 * @returns Strict VBox status code.8037 * @param pVCpu The cross context virtual CPU structure of the calling thread.8038 * @param pcbLimit Where to return the limit.8039 * @param pGCPtrBase Where to return the base.8040 * @param iSegReg The index of the segment register to use for8041 * this access. The base and limits are checked.8042 * @param GCPtrMem The address of the guest memory.8043 * @param enmOpSize The effective operand size.8044 */8045 VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,8046 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT8047 {8048 /*8049 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a8050 * little special:8051 * - The two reads are done separately.8052 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.8053 * - We suspect the 386 to actually commit the limit before the base in8054 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We8055 * don't try emulate this eccentric behavior, because it's not well8056 * enough understood and rather hard to trigger.8057 * - The 486 seems to do a dword limit read when the operand size is 32-bit.8058 */8059 VBOXSTRICTRC rcStrict;8060 if (IEM_IS_64BIT_CODE(pVCpu))8061 {8062 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8063 if (rcStrict == VINF_SUCCESS)8064 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);8065 }8066 else8067 {8068 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */8069 if (enmOpSize == IEMMODE_32BIT)8070 {8071 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)8072 {8073 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8074 if (rcStrict == VINF_SUCCESS)8075 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8076 }8077 else8078 {8079 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);8080 if (rcStrict == VINF_SUCCESS)8081 {8082 *pcbLimit = (uint16_t)uTmp;8083 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8084 }8085 }8086 if (rcStrict == VINF_SUCCESS)8087 *pGCPtrBase = uTmp;8088 }8089 else8090 {8091 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8092 if (rcStrict == VINF_SUCCESS)8093 {8094 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8095 if (rcStrict == VINF_SUCCESS)8096 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);8097 }8098 }8099 }8100 return rcStrict;8101 }8102 8103 8104 /**8105 * Stores a data dqword, SSE aligned.8106 *8107 * @returns Strict VBox status code.8108 * @param pVCpu The cross context virtual CPU structure of the calling thread.8109 * @param iSegReg The index of the segment register to use for8110 * this access. The base and limits are checked.8111 * @param GCPtrMem The address of the guest memory.8112 * @param u128Value The value to store.8113 */8114 VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT8115 {8116 /* The lazy approach for now... */8117 uint8_t bUnmapInfo;8118 PRTUINT128U pu128Dst;8119 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,8120 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);8121 if (rc == VINF_SUCCESS)8122 {8123 pu128Dst->au64[0] = u128Value.au64[0];8124 pu128Dst->au64[1] = u128Value.au64[1];8125 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8126 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));8127 }8128 return rc;8129 }8130 8131 8132 #ifdef IEM_WITH_SETJMP8133 /**8134 * Stores a data dqword, SSE aligned.8135 *8136 * @returns Strict VBox status code.8137 * @param pVCpu The cross context virtual CPU structure of the calling thread.8138 * @param iSegReg The index of the segment register to use for8139 * this access. The base and limits are checked.8140 * @param GCPtrMem The address of the guest memory.8141 * @param u128Value The value to store.8142 */8143 void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,8144 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP8145 {8146 /* The lazy approach for now... */8147 uint8_t bUnmapInfo;8148 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,8149 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);8150 pu128Dst->au64[0] = u128Value.au64[0];8151 pu128Dst->au64[1] = u128Value.au64[1];8152 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);8153 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));8154 }8155 #endif8156 8157 8158 /**8159 * Stores a data dqword.8160 *8161 * @returns Strict VBox status code.8162 * @param pVCpu The cross context virtual CPU structure of the calling thread.8163 * @param iSegReg The index of the segment register to use for8164 * this access. The base and limits are checked.8165 * @param GCPtrMem The address of the guest memory.8166 * @param pu256Value Pointer to the value to store.8167 */8168 VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT8169 {8170 /* The lazy approach for now... */8171 uint8_t bUnmapInfo;8172 PRTUINT256U pu256Dst;8173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,8174 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);8175 if (rc == VINF_SUCCESS)8176 {8177 pu256Dst->au64[0] = pu256Value->au64[0];8178 pu256Dst->au64[1] = pu256Value->au64[1];8179 pu256Dst->au64[2] = pu256Value->au64[2];8180 pu256Dst->au64[3] = pu256Value->au64[3];8181 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8182 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));8183 }8184 return rc;8185 }8186 8187 8188 #ifdef IEM_WITH_SETJMP8189 /**8190 * Stores a data dqword, longjmp on error.8191 *8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.8193 * @param iSegReg The index of the segment register to use for8194 * this access. The base and limits are checked.8195 * @param GCPtrMem The address of the guest memory.8196 * @param pu256Value Pointer to the value to store.8197 */8198 void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP8199 {8200 /* The lazy approach for now... */8201 uint8_t bUnmapInfo;8202 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,8203 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);8204 pu256Dst->au64[0] = pu256Value->au64[0];8205 pu256Dst->au64[1] = pu256Value->au64[1];8206 pu256Dst->au64[2] = pu256Value->au64[2];8207 pu256Dst->au64[3] = pu256Value->au64[3];8208 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);8209 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));8210 }8211 #endif8212 8213 8214 /**8215 * Stores a descriptor register (sgdt, sidt).8216 *8217 * @returns Strict VBox status code.8218 * @param pVCpu The cross context virtual CPU structure of the calling thread.8219 * @param cbLimit The limit.8220 * @param GCPtrBase The base address.8221 * @param iSegReg The index of the segment register to use for8222 * this access. The base and limits are checked.8223 * @param GCPtrMem The address of the guest memory.8224 */8225 VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8226 {8227 /*8228 * The SIDT and SGDT instructions actually stores the data using two8229 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions8230 * does not respond to opsize prefixes.8231 */8232 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);8233 if (rcStrict == VINF_SUCCESS)8234 {8235 if (IEM_IS_16BIT_CODE(pVCpu))8236 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,8237 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_2868238 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);8239 else if (IEM_IS_32BIT_CODE(pVCpu))8240 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);8241 else8242 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);8243 }8244 return rcStrict;8245 }8246 8247 8248 /**8249 * Begin a special stack push (used by interrupt, exceptions and such).8250 *8251 * This will raise \#SS or \#PF if appropriate.8252 *8253 * @returns Strict VBox status code.8254 * @param pVCpu The cross context virtual CPU structure of the calling thread.8255 * @param cbMem The number of bytes to push onto the stack.8256 * @param cbAlign The alignment mask (7, 3, 1).8257 * @param ppvMem Where to return the pointer to the stack memory.8258 * As with the other memory functions this could be8259 * direct access or bounce buffered access, so8260 * don't commit register until the commit call8261 * succeeds.8262 * @param pbUnmapInfo Where to store unmap info for8263 * iemMemStackPushCommitSpecial.8264 * @param puNewRsp Where to return the new RSP value. This must be8265 * passed unchanged to8266 * iemMemStackPushCommitSpecial().8267 */8268 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,8269 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT8270 {8271 Assert(cbMem < UINT8_MAX);8272 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);8273 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);8274 }8275 8276 8277 /**8278 * Commits a special stack push (started by iemMemStackPushBeginSpecial).8279 *8280 * This will update the rSP.8281 *8282 * @returns Strict VBox status code.8283 * @param pVCpu The cross context virtual CPU structure of the calling thread.8284 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.8285 * @param uNewRsp The new RSP value returned by8286 * iemMemStackPushBeginSpecial().8287 */8288 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT8289 {8290 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8291 if (rcStrict == VINF_SUCCESS)8292 pVCpu->cpum.GstCtx.rsp = uNewRsp;8293 return rcStrict;8294 }8295 8296 8297 /**8298 * Begin a special stack pop (used by iret, retf and such).8299 *8300 * This will raise \#SS or \#PF if appropriate.8301 *8302 * @returns Strict VBox status code.8303 * @param pVCpu The cross context virtual CPU structure of the calling thread.8304 * @param cbMem The number of bytes to pop from the stack.8305 * @param cbAlign The alignment mask (7, 3, 1).8306 * @param ppvMem Where to return the pointer to the stack memory.8307 * @param pbUnmapInfo Where to store unmap info for8308 * iemMemStackPopDoneSpecial.8309 * @param puNewRsp Where to return the new RSP value. This must be8310 * assigned to CPUMCTX::rsp manually some time8311 * after iemMemStackPopDoneSpecial() has been8312 * called.8313 */8314 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,8315 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT8316 {8317 Assert(cbMem < UINT8_MAX);8318 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);8319 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);8320 }8321 8322 8323 /**8324 * Continue a special stack pop (used by iret and retf), for the purpose of8325 * retrieving a new stack pointer.8326 *8327 * This will raise \#SS or \#PF if appropriate.8328 *8329 * @returns Strict VBox status code.8330 * @param pVCpu The cross context virtual CPU structure of the calling thread.8331 * @param off Offset from the top of the stack. This is zero8332 * except in the retf case.8333 * @param cbMem The number of bytes to pop from the stack.8334 * @param ppvMem Where to return the pointer to the stack memory.8335 * @param pbUnmapInfo Where to store unmap info for8336 * iemMemStackPopDoneSpecial.8337 * @param uCurNewRsp The current uncommitted RSP value. (No need to8338 * return this because all use of this function is8339 * to retrieve a new value and anything we return8340 * here would be discarded.)8341 */8342 VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,8343 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT8344 {8345 Assert(cbMem < UINT8_MAX);8346 8347 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */8348 RTGCPTR GCPtrTop;8349 if (IEM_IS_64BIT_CODE(pVCpu))8350 GCPtrTop = uCurNewRsp;8351 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)8352 GCPtrTop = (uint32_t)uCurNewRsp;8353 else8354 GCPtrTop = (uint16_t)uCurNewRsp;8355 8356 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,8357 0 /* checked in iemMemStackPopBeginSpecial */);8358 }8359 8360 8361 /**8362 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or8363 * iemMemStackPopContinueSpecial).8364 *8365 * The caller will manually commit the rSP.8366 *8367 * @returns Strict VBox status code.8368 * @param pVCpu The cross context virtual CPU structure of the calling thread.8369 * @param bUnmapInfo Unmap information returned by8370 * iemMemStackPopBeginSpecial() or8371 * iemMemStackPopContinueSpecial().8372 */8373 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT8374 {8375 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8376 }8377 8378 8379 /**8380 * Fetches a system table byte.8381 *8382 * @returns Strict VBox status code.8383 * @param pVCpu The cross context virtual CPU structure of the calling thread.8384 * @param pbDst Where to return the byte.8385 * @param iSegReg The index of the segment register to use for8386 * this access. The base and limits are checked.8387 * @param GCPtrMem The address of the guest memory.8388 */8389 VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8390 {8391 /* The lazy approach for now... */8392 uint8_t bUnmapInfo;8393 uint8_t const *pbSrc;8394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8395 if (rc == VINF_SUCCESS)8396 {8397 *pbDst = *pbSrc;8398 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8399 }8400 return rc;8401 }8402 8403 8404 /**8405 * Fetches a system table word.8406 *8407 * @returns Strict VBox status code.8408 * @param pVCpu The cross context virtual CPU structure of the calling thread.8409 * @param pu16Dst Where to return the word.8410 * @param iSegReg The index of the segment register to use for8411 * this access. The base and limits are checked.8412 * @param GCPtrMem The address of the guest memory.8413 */8414 VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8415 {8416 /* The lazy approach for now... */8417 uint8_t bUnmapInfo;8418 uint16_t const *pu16Src;8419 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8420 if (rc == VINF_SUCCESS)8421 {8422 *pu16Dst = *pu16Src;8423 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8424 }8425 return rc;8426 }8427 8428 8429 /**8430 * Fetches a system table dword.8431 *8432 * @returns Strict VBox status code.8433 * @param pVCpu The cross context virtual CPU structure of the calling thread.8434 * @param pu32Dst Where to return the dword.8435 * @param iSegReg The index of the segment register to use for8436 * this access. The base and limits are checked.8437 * @param GCPtrMem The address of the guest memory.8438 */8439 VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8440 {8441 /* The lazy approach for now... */8442 uint8_t bUnmapInfo;8443 uint32_t const *pu32Src;8444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8445 if (rc == VINF_SUCCESS)8446 {8447 *pu32Dst = *pu32Src;8448 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8449 }8450 return rc;8451 }8452 8453 8454 /**8455 * Fetches a system table qword.8456 *8457 * @returns Strict VBox status code.8458 * @param pVCpu The cross context virtual CPU structure of the calling thread.8459 * @param pu64Dst Where to return the qword.8460 * @param iSegReg The index of the segment register to use for8461 * this access. The base and limits are checked.8462 * @param GCPtrMem The address of the guest memory.8463 */8464 VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8465 {8466 /* The lazy approach for now... */8467 uint8_t bUnmapInfo;8468 uint64_t const *pu64Src;8469 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8470 if (rc == VINF_SUCCESS)8471 {8472 *pu64Dst = *pu64Src;8473 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8474 }8475 return rc;8476 }8477 8478 8479 /**8480 * Fetches a descriptor table entry with caller specified error code.8481 *8482 * @returns Strict VBox status code.8483 * @param pVCpu The cross context virtual CPU structure of the calling thread.8484 * @param pDesc Where to return the descriptor table entry.8485 * @param uSel The selector which table entry to fetch.8486 * @param uXcpt The exception to raise on table lookup error.8487 * @param uErrorCode The error code associated with the exception.8488 */8489 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,8490 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT8491 {8492 AssertPtr(pDesc);8493 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);8494 8495 /** @todo did the 286 require all 8 bytes to be accessible? */8496 /*8497 * Get the selector table base and check bounds.8498 */8499 RTGCPTR GCPtrBase;8500 if (uSel & X86_SEL_LDT)8501 {8502 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present8503 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )8504 {8505 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",8506 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));8507 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,8508 uErrorCode, 0);8509 }8510 8511 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);8512 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;8513 }8514 else8515 {8516 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)8517 {8518 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));8519 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,8520 uErrorCode, 0);8521 }8522 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;8523 }8524 8525 /*8526 * Read the legacy descriptor and maybe the long mode extensions if8527 * required.8528 */8529 VBOXSTRICTRC rcStrict;8530 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)8531 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));8532 else8533 {8534 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);8535 if (rcStrict == VINF_SUCCESS)8536 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);8537 if (rcStrict == VINF_SUCCESS)8538 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);8539 if (rcStrict == VINF_SUCCESS)8540 pDesc->Legacy.au16[3] = 0;8541 else8542 return rcStrict;8543 }8544 8545 if (rcStrict == VINF_SUCCESS)8546 {8547 if ( !IEM_IS_LONG_MODE(pVCpu)8548 || pDesc->Legacy.Gen.u1DescType)8549 pDesc->Long.au64[1] = 0;8550 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 88551 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))8552 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);8553 else8554 {8555 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));8556 /** @todo is this the right exception? */8557 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);8558 }8559 }8560 return rcStrict;8561 }8562 8563 8564 /**8565 * Fetches a descriptor table entry.8566 *8567 * @returns Strict VBox status code.8568 * @param pVCpu The cross context virtual CPU structure of the calling thread.8569 * @param pDesc Where to return the descriptor table entry.8570 * @param uSel The selector which table entry to fetch.8571 * @param uXcpt The exception to raise on table lookup error.8572 */8573 VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT8574 {8575 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);8576 }8577 8578 8579 /**8580 * Marks the selector descriptor as accessed (only non-system descriptors).8581 *8582 * This function ASSUMES that iemMemFetchSelDesc has be called previously and8583 * will therefore skip the limit checks.8584 *8585 * @returns Strict VBox status code.8586 * @param pVCpu The cross context virtual CPU structure of the calling thread.8587 * @param uSel The selector.8588 */8589 VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT8590 {8591 /*8592 * Get the selector table base and calculate the entry address.8593 */8594 RTGCPTR GCPtr = uSel & X86_SEL_LDT8595 ? pVCpu->cpum.GstCtx.ldtr.u64Base8596 : pVCpu->cpum.GstCtx.gdtr.pGdt;8597 GCPtr += uSel & X86_SEL_MASK;8598 8599 /*8600 * ASMAtomicBitSet will assert if the address is misaligned, so do some8601 * ugly stuff to avoid this. This will make sure it's an atomic access8602 * as well more or less remove any question about 8-bit or 32-bit accesss.8603 */8604 VBOXSTRICTRC rcStrict;8605 uint8_t bUnmapInfo;8606 uint32_t volatile *pu32;8607 if ((GCPtr & 3) == 0)8608 {8609 /* The normal case, map the 32-bit bits around the accessed bit (40). */8610 GCPtr += 2 + 2;8611 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8612 if (rcStrict != VINF_SUCCESS)8613 return rcStrict;8614 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */8615 }8616 else8617 {8618 /* The misaligned GDT/LDT case, map the whole thing. */8619 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8620 if (rcStrict != VINF_SUCCESS)8621 return rcStrict;8622 switch ((uintptr_t)pu32 & 3)8623 {8624 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;8625 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;8626 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;8627 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;8628 }8629 }8630 8631 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8632 }8633 8634 8635 #undef LOG_GROUP8636 #define LOG_GROUP LOG_GROUP_IEM8637 8638 /** @} */8639 8640 /** @name Opcode Helpers.8641 * @{8642 */8643 8644 /**8645 * Calculates the effective address of a ModR/M memory operand.8646 *8647 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.8648 *8649 * @return Strict VBox status code.8650 * @param pVCpu The cross context virtual CPU structure of the calling thread.8651 * @param bRm The ModRM byte.8652 * @param cbImmAndRspOffset - First byte: The size of any immediate8653 * following the effective address opcode bytes8654 * (only for RIP relative addressing).8655 * - Second byte: RSP displacement (for POP [ESP]).8656 * @param pGCPtrEff Where to return the effective address.8657 */8658 VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT8659 {8660 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));8661 # define SET_SS_DEF() \8662 do \8663 { \8664 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \8665 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \8666 } while (0)8667 8668 if (!IEM_IS_64BIT_CODE(pVCpu))8669 {8670 /** @todo Check the effective address size crap! */8671 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)8672 {8673 uint16_t u16EffAddr;8674 8675 /* Handle the disp16 form with no registers first. */8676 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)8677 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);8678 else8679 {8680 /* Get the displacment. */8681 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8682 {8683 case 0: u16EffAddr = 0; break;8684 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;8685 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;8686 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */8687 }8688 8689 /* Add the base and index registers to the disp. */8690 switch (bRm & X86_MODRM_RM_MASK)8691 {8692 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;8693 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;8694 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;8695 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;8696 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;8697 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;8698 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;8699 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;8700 }8701 }8702 8703 *pGCPtrEff = u16EffAddr;8704 }8705 else8706 {8707 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8708 uint32_t u32EffAddr;8709 8710 /* Handle the disp32 form with no registers first. */8711 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)8712 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);8713 else8714 {8715 /* Get the register (or SIB) value. */8716 switch ((bRm & X86_MODRM_RM_MASK))8717 {8718 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;8719 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;8720 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;8721 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;8722 case 4: /* SIB */8723 {8724 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);8725 8726 /* Get the index and scale it. */8727 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)8728 {8729 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;8730 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;8731 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;8732 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;8733 case 4: u32EffAddr = 0; /*none */ break;8734 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;8735 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;8736 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;8737 IEM_NOT_REACHED_DEFAULT_CASE_RET();8738 }8739 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;8740 8741 /* add base */8742 switch (bSib & X86_SIB_BASE_MASK)8743 {8744 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;8745 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;8746 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;8747 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;8748 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;8749 case 5:8750 if ((bRm & X86_MODRM_MOD_MASK) != 0)8751 {8752 u32EffAddr += pVCpu->cpum.GstCtx.ebp;8753 SET_SS_DEF();8754 }8755 else8756 {8757 uint32_t u32Disp;8758 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8759 u32EffAddr += u32Disp;8760 }8761 break;8762 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;8763 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;8764 IEM_NOT_REACHED_DEFAULT_CASE_RET();8765 }8766 break;8767 }8768 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;8769 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;8770 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;8771 IEM_NOT_REACHED_DEFAULT_CASE_RET();8772 }8773 8774 /* Get and add the displacement. */8775 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8776 {8777 case 0:8778 break;8779 case 1:8780 {8781 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);8782 u32EffAddr += i8Disp;8783 break;8784 }8785 case 2:8786 {8787 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);8788 u32EffAddr += u32Disp;8789 break;8790 }8791 default:8792 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */8793 }8794 8795 }8796 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8797 *pGCPtrEff = u32EffAddr;8798 }8799 }8800 else8801 {8802 uint64_t u64EffAddr;8803 8804 /* Handle the rip+disp32 form with no registers first. */8805 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)8806 {8807 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);8808 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));8809 }8810 else8811 {8812 /* Get the register (or SIB) value. */8813 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)8814 {8815 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;8816 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;8817 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;8818 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;8819 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;8820 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;8821 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;8822 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;8823 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;8824 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;8825 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;8826 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;8827 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;8828 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;8829 /* SIB */8830 case 4:8831 case 12:8832 {8833 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);8834 8835 /* Get the index and scale it. */8836 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)8837 {8838 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;8839 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;8840 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;8841 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;8842 case 4: u64EffAddr = 0; /*none */ break;8843 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;8844 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;8845 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;8846 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;8847 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;8848 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;8849 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;8850 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;8851 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;8852 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;8853 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;8854 IEM_NOT_REACHED_DEFAULT_CASE_RET();8855 }8856 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;8857 8858 /* add base */8859 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)8860 {8861 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;8862 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;8863 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;8864 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;8865 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;8866 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;8867 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;8868 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;8869 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;8870 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;8871 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;8872 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;8873 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;8874 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;8875 /* complicated encodings */8876 case 5:8877 case 13:8878 if ((bRm & X86_MODRM_MOD_MASK) != 0)8879 {8880 if (!pVCpu->iem.s.uRexB)8881 {8882 u64EffAddr += pVCpu->cpum.GstCtx.rbp;8883 SET_SS_DEF();8884 }8885 else8886 u64EffAddr += pVCpu->cpum.GstCtx.r13;8887 }8888 else8889 {8890 uint32_t u32Disp;8891 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8892 u64EffAddr += (int32_t)u32Disp;8893 }8894 break;8895 IEM_NOT_REACHED_DEFAULT_CASE_RET();8896 }8897 break;8898 }8899 IEM_NOT_REACHED_DEFAULT_CASE_RET();8900 }8901 8902 /* Get and add the displacement. */8903 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8904 {8905 case 0:8906 break;8907 case 1:8908 {8909 int8_t i8Disp;8910 IEM_OPCODE_GET_NEXT_S8(&i8Disp);8911 u64EffAddr += i8Disp;8912 break;8913 }8914 case 2:8915 {8916 uint32_t u32Disp;8917 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8918 u64EffAddr += (int32_t)u32Disp;8919 break;8920 }8921 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */8922 }8923 8924 }8925 8926 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)8927 *pGCPtrEff = u64EffAddr;8928 else8929 {8930 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8931 *pGCPtrEff = u64EffAddr & UINT32_MAX;8932 }8933 }8934 8935 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));8936 return VINF_SUCCESS;8937 }8938 8939 8940 #ifdef IEM_WITH_SETJMP8941 /**8942 * Calculates the effective address of a ModR/M memory operand.8943 *8944 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.8945 *8946 * May longjmp on internal error.8947 *8948 * @return The effective address.8949 * @param pVCpu The cross context virtual CPU structure of the calling thread.8950 * @param bRm The ModRM byte.8951 * @param cbImmAndRspOffset - First byte: The size of any immediate8952 * following the effective address opcode bytes8953 * (only for RIP relative addressing).8954 * - Second byte: RSP displacement (for POP [ESP]).8955 */8956 RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP8957 {8958 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));8959 # define SET_SS_DEF() \8960 do \8961 { \8962 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \8963 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \8964 } while (0)8965 8966 if (!IEM_IS_64BIT_CODE(pVCpu))8967 {8968 /** @todo Check the effective address size crap! */8969 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)8970 {8971 uint16_t u16EffAddr;8972 8973 /* Handle the disp16 form with no registers first. */8974 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)8975 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);8976 else8977 {8978 /* Get the displacment. */8979 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8980 {8981 case 0: u16EffAddr = 0; break;8982 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;8983 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;8984 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */8985 }8986 8987 /* Add the base and index registers to the disp. */8988 switch (bRm & X86_MODRM_RM_MASK)8989 {8990 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;8991 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;8992 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;8993 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;8994 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;8995 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;8996 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;8997 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;8998 }8999 }9000 9001 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));9002 return u16EffAddr;9003 }9004 9005 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9006 uint32_t u32EffAddr;9007 9008 /* Handle the disp32 form with no registers first. */9009 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9010 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);9011 else9012 {9013 /* Get the register (or SIB) value. */9014 switch ((bRm & X86_MODRM_RM_MASK))9015 {9016 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9017 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9018 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9019 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9020 case 4: /* SIB */9021 {9022 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9023 9024 /* Get the index and scale it. */9025 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)9026 {9027 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9028 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9029 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9030 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9031 case 4: u32EffAddr = 0; /*none */ break;9032 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;9033 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9034 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9035 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9036 }9037 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9038 9039 /* add base */9040 switch (bSib & X86_SIB_BASE_MASK)9041 {9042 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;9043 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;9044 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;9045 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;9046 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9047 case 5:9048 if ((bRm & X86_MODRM_MOD_MASK) != 0)9049 {9050 u32EffAddr += pVCpu->cpum.GstCtx.ebp;9051 SET_SS_DEF();9052 }9053 else9054 {9055 uint32_t u32Disp;9056 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9057 u32EffAddr += u32Disp;9058 }9059 break;9060 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;9061 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;9062 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9063 }9064 break;9065 }9066 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;9067 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9068 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9069 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9070 }9071 9072 /* Get and add the displacement. */9073 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9074 {9075 case 0:9076 break;9077 case 1:9078 {9079 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);9080 u32EffAddr += i8Disp;9081 break;9082 }9083 case 2:9084 {9085 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);9086 u32EffAddr += u32Disp;9087 break;9088 }9089 default:9090 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */9091 }9092 }9093 9094 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9095 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));9096 return u32EffAddr;9097 }9098 9099 uint64_t u64EffAddr;9100 9101 /* Handle the rip+disp32 form with no registers first. */9102 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9103 {9104 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);9105 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));9106 }9107 else9108 {9109 /* Get the register (or SIB) value. */9110 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)9111 {9112 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9113 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9114 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9115 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9116 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;9117 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9118 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9119 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9120 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9121 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9122 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9123 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9124 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9125 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9126 /* SIB */9127 case 4:9128 case 12:9129 {9130 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9131 9132 /* Get the index and scale it. */9133 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)9134 {9135 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9136 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9137 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9138 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9139 case 4: u64EffAddr = 0; /*none */ break;9140 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;9141 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9142 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9143 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9144 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9145 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9146 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9147 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;9148 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9149 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9150 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9151 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9152 }9153 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9154 9155 /* add base */9156 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)9157 {9158 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;9159 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;9160 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;9161 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;9162 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9163 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;9164 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;9165 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;9166 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;9167 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;9168 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;9169 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;9170 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;9171 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;9172 /* complicated encodings */9173 case 5:9174 case 13:9175 if ((bRm & X86_MODRM_MOD_MASK) != 0)9176 {9177 if (!pVCpu->iem.s.uRexB)9178 {9179 u64EffAddr += pVCpu->cpum.GstCtx.rbp;9180 SET_SS_DEF();9181 }9182 else9183 u64EffAddr += pVCpu->cpum.GstCtx.r13;9184 }9185 else9186 {9187 uint32_t u32Disp;9188 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9189 u64EffAddr += (int32_t)u32Disp;9190 }9191 break;9192 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9193 }9194 break;9195 }9196 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9197 }9198 9199 /* Get and add the displacement. */9200 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9201 {9202 case 0:9203 break;9204 case 1:9205 {9206 int8_t i8Disp;9207 IEM_OPCODE_GET_NEXT_S8(&i8Disp);9208 u64EffAddr += i8Disp;9209 break;9210 }9211 case 2:9212 {9213 uint32_t u32Disp;9214 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9215 u64EffAddr += (int32_t)u32Disp;9216 break;9217 }9218 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */9219 }9220 9221 }9222 9223 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)9224 {9225 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));9226 return u64EffAddr;9227 }9228 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9229 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));9230 return u64EffAddr & UINT32_MAX;9231 }9232 #endif /* IEM_WITH_SETJMP */9233 9234 9235 /**9236 * Calculates the effective address of a ModR/M memory operand, extended version9237 * for use in the recompilers.9238 *9239 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.9240 *9241 * @return Strict VBox status code.9242 * @param pVCpu The cross context virtual CPU structure of the calling thread.9243 * @param bRm The ModRM byte.9244 * @param cbImmAndRspOffset - First byte: The size of any immediate9245 * following the effective address opcode bytes9246 * (only for RIP relative addressing).9247 * - Second byte: RSP displacement (for POP [ESP]).9248 * @param pGCPtrEff Where to return the effective address.9249 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and9250 * SIB byte (bits 39:32).9251 */9252 VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT9253 {9254 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));9255 # define SET_SS_DEF() \9256 do \9257 { \9258 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \9259 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \9260 } while (0)9261 9262 uint64_t uInfo;9263 if (!IEM_IS_64BIT_CODE(pVCpu))9264 {9265 /** @todo Check the effective address size crap! */9266 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)9267 {9268 uint16_t u16EffAddr;9269 9270 /* Handle the disp16 form with no registers first. */9271 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)9272 {9273 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);9274 uInfo = u16EffAddr;9275 }9276 else9277 {9278 /* Get the displacment. */9279 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9280 {9281 case 0: u16EffAddr = 0; break;9282 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;9283 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;9284 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */9285 }9286 uInfo = u16EffAddr;9287 9288 /* Add the base and index registers to the disp. */9289 switch (bRm & X86_MODRM_RM_MASK)9290 {9291 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;9292 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;9293 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;9294 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;9295 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;9296 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;9297 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;9298 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;9299 }9300 }9301 9302 *pGCPtrEff = u16EffAddr;9303 }9304 else9305 {9306 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9307 uint32_t u32EffAddr;9308 9309 /* Handle the disp32 form with no registers first. */9310 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9311 {9312 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);9313 uInfo = u32EffAddr;9314 }9315 else9316 {9317 /* Get the register (or SIB) value. */9318 uInfo = 0;9319 switch ((bRm & X86_MODRM_RM_MASK))9320 {9321 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9322 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9323 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9324 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9325 case 4: /* SIB */9326 {9327 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9328 uInfo = (uint64_t)bSib << 32;9329 9330 /* Get the index and scale it. */9331 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)9332 {9333 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9334 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9335 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9336 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9337 case 4: u32EffAddr = 0; /*none */ break;9338 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;9339 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9340 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9341 IEM_NOT_REACHED_DEFAULT_CASE_RET();9342 }9343 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9344 9345 /* add base */9346 switch (bSib & X86_SIB_BASE_MASK)9347 {9348 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;9349 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;9350 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;9351 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;9352 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9353 case 5:9354 if ((bRm & X86_MODRM_MOD_MASK) != 0)9355 {9356 u32EffAddr += pVCpu->cpum.GstCtx.ebp;9357 SET_SS_DEF();9358 }9359 else9360 {9361 uint32_t u32Disp;9362 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9363 u32EffAddr += u32Disp;9364 uInfo |= u32Disp;9365 }9366 break;9367 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;9368 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;9369 IEM_NOT_REACHED_DEFAULT_CASE_RET();9370 }9371 break;9372 }9373 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;9374 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9375 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9376 IEM_NOT_REACHED_DEFAULT_CASE_RET();9377 }9378 9379 /* Get and add the displacement. */9380 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9381 {9382 case 0:9383 break;9384 case 1:9385 {9386 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);9387 u32EffAddr += i8Disp;9388 uInfo |= (uint32_t)(int32_t)i8Disp;9389 break;9390 }9391 case 2:9392 {9393 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);9394 u32EffAddr += u32Disp;9395 uInfo |= (uint32_t)u32Disp;9396 break;9397 }9398 default:9399 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */9400 }9401 9402 }9403 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9404 *pGCPtrEff = u32EffAddr;9405 }9406 }9407 else9408 {9409 uint64_t u64EffAddr;9410 9411 /* Handle the rip+disp32 form with no registers first. */9412 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9413 {9414 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);9415 uInfo = (uint32_t)u64EffAddr;9416 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));9417 }9418 else9419 {9420 /* Get the register (or SIB) value. */9421 uInfo = 0;9422 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)9423 {9424 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9425 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9426 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9427 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9428 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;9429 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9430 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9431 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9432 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9433 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9434 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9435 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9436 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9437 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9438 /* SIB */9439 case 4:9440 case 12:9441 {9442 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9443 uInfo = (uint64_t)bSib << 32;9444 9445 /* Get the index and scale it. */9446 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)9447 {9448 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9449 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9450 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9451 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9452 case 4: u64EffAddr = 0; /*none */ break;9453 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;9454 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9455 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9456 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9457 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9458 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9459 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9460 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;9461 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9462 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9463 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9464 IEM_NOT_REACHED_DEFAULT_CASE_RET();9465 }9466 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9467 9468 /* add base */9469 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)9470 {9471 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;9472 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;9473 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;9474 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;9475 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9476 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;9477 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;9478 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;9479 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;9480 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;9481 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;9482 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;9483 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;9484 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;9485 /* complicated encodings */9486 case 5:9487 case 13:9488 if ((bRm & X86_MODRM_MOD_MASK) != 0)9489 {9490 if (!pVCpu->iem.s.uRexB)9491 {9492 u64EffAddr += pVCpu->cpum.GstCtx.rbp;9493 SET_SS_DEF();9494 }9495 else9496 u64EffAddr += pVCpu->cpum.GstCtx.r13;9497 }9498 else9499 {9500 uint32_t u32Disp;9501 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9502 u64EffAddr += (int32_t)u32Disp;9503 uInfo |= u32Disp;9504 }9505 break;9506 IEM_NOT_REACHED_DEFAULT_CASE_RET();9507 }9508 break;9509 }9510 IEM_NOT_REACHED_DEFAULT_CASE_RET();9511 }9512 9513 /* Get and add the displacement. */9514 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9515 {9516 case 0:9517 break;9518 case 1:9519 {9520 int8_t i8Disp;9521 IEM_OPCODE_GET_NEXT_S8(&i8Disp);9522 u64EffAddr += i8Disp;9523 uInfo |= (uint32_t)(int32_t)i8Disp;9524 break;9525 }9526 case 2:9527 {9528 uint32_t u32Disp;9529 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9530 u64EffAddr += (int32_t)u32Disp;9531 uInfo |= u32Disp;9532 break;9533 }9534 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */9535 }9536 9537 }9538 9539 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)9540 *pGCPtrEff = u64EffAddr;9541 else9542 {9543 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9544 *pGCPtrEff = u64EffAddr & UINT32_MAX;9545 }9546 }9547 *puInfo = uInfo;9548 9549 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));9550 return VINF_SUCCESS;9551 }9552 9553 /** @} */9554 9555 9556 #ifdef LOG_ENABLED9557 /**9558 * Logs the current instruction.9559 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9560 * @param fSameCtx Set if we have the same context information as the VMM,9561 * clear if we may have already executed an instruction in9562 * our debug context. When clear, we assume IEMCPU holds9563 * valid CPU mode info.9564 *9565 * The @a fSameCtx parameter is now misleading and obsolete.9566 * @param pszFunction The IEM function doing the execution.9567 */9568 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT9569 {9570 # ifdef IN_RING39571 if (LogIs2Enabled())9572 {9573 char szInstr[256];9574 uint32_t cbInstr = 0;9575 if (fSameCtx)9576 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,9577 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,9578 szInstr, sizeof(szInstr), &cbInstr);9579 else9580 {9581 uint32_t fFlags = 0;9582 switch (IEM_GET_CPU_MODE(pVCpu))9583 {9584 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;9585 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;9586 case IEMMODE_16BIT:9587 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)9588 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;9589 else9590 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;9591 break;9592 }9593 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,9594 szInstr, sizeof(szInstr), &cbInstr);9595 }9596 9597 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;9598 Log2(("**** %s fExec=%x\n"9599 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"9600 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"9601 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"9602 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"9603 " %s\n"9604 , pszFunction, pVCpu->iem.s.fExec,9605 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,9606 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,9607 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,9608 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,9609 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,9610 szInstr));9611 9612 /* This stuff sucks atm. as it fills the log with MSRs. */9613 //if (LogIs3Enabled())9614 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);9615 }9616 else9617 # endif9618 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,9619 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));9620 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);9621 }9622 #endif /* LOG_ENABLED */9623 9624 9625 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9626 /**9627 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,9628 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.9629 *9630 * @returns Modified rcStrict.9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.9632 * @param rcStrict The instruction execution status.9633 */9634 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT9635 {9636 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));9637 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))9638 {9639 /* VMX preemption timer takes priority over NMI-window exits. */9640 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))9641 {9642 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);9643 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));9644 }9645 /*9646 * Check remaining intercepts.9647 *9648 * NMI-window and Interrupt-window VM-exits.9649 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.9650 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.9651 *9652 * See Intel spec. 26.7.6 "NMI-Window Exiting".9653 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".9654 */9655 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)9656 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)9657 && !TRPMHasTrap(pVCpu))9658 {9659 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));9660 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)9661 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))9662 {9663 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);9664 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));9665 }9666 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)9667 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))9668 {9669 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);9670 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));9671 }9672 }9673 }9674 /* TPR-below threshold/APIC write has the highest priority. */9675 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))9676 {9677 rcStrict = iemVmxApicWriteEmulation(pVCpu);9678 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));9679 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));9680 }9681 /* MTF takes priority over VMX-preemption timer. */9682 else9683 {9684 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);9685 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));9686 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));9687 }9688 return rcStrict;9689 }9690 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */9691 9692 9693 /**9694 * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,9695 * IEMExecOneBypass and friends.9696 *9697 * Similar code is found in IEMExecLots.9698 *9699 * @return Strict VBox status code.9700 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9701 * @param fExecuteInhibit If set, execute the instruction following CLI,9702 * POP SS and MOV SS,GR.9703 * @param pszFunction The calling function name.9704 */9705 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)9706 {9707 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9708 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9709 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9710 RT_NOREF_PV(pszFunction);9711 9712 #ifdef IEM_WITH_SETJMP9713 VBOXSTRICTRC rcStrict;9714 IEM_TRY_SETJMP(pVCpu, rcStrict)9715 {9716 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9717 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9718 }9719 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);9720 {9721 pVCpu->iem.s.cLongJumps++;9722 }9723 IEM_CATCH_LONGJMP_END(pVCpu);9724 #else9725 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9726 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9727 #endif9728 if (rcStrict == VINF_SUCCESS)9729 pVCpu->iem.s.cInstructions++;9730 if (pVCpu->iem.s.cActiveMappings > 0)9731 {9732 Assert(rcStrict != VINF_SUCCESS);9733 iemMemRollback(pVCpu);9734 }9735 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9736 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9737 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9738 9739 //#ifdef DEBUG9740 // AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));9741 //#endif9742 9743 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9744 /*9745 * Perform any VMX nested-guest instruction boundary actions.9746 *9747 * If any of these causes a VM-exit, we must skip executing the next9748 * instruction (would run into stale page tables). A VM-exit makes sure9749 * there is no interrupt-inhibition, so that should ensure we don't go9750 * to try execute the next instruction. Clearing fExecuteInhibit is9751 * problematic because of the setjmp/longjmp clobbering above.9752 */9753 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER9754 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)9755 || rcStrict != VINF_SUCCESS)9756 { /* likely */ }9757 else9758 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);9759 #endif9760 9761 /* Execute the next instruction as well if a cli, pop ss or9762 mov ss, Gr has just completed successfully. */9763 if ( fExecuteInhibit9764 && rcStrict == VINF_SUCCESS9765 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))9766 {9767 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));9768 if (rcStrict == VINF_SUCCESS)9769 {9770 #ifdef LOG_ENABLED9771 iemLogCurInstr(pVCpu, false, pszFunction);9772 #endif9773 #ifdef IEM_WITH_SETJMP9774 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)9775 {9776 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9777 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9778 }9779 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);9780 {9781 pVCpu->iem.s.cLongJumps++;9782 }9783 IEM_CATCH_LONGJMP_END(pVCpu);9784 #else9785 IEM_OPCODE_GET_FIRST_U8(&b);9786 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9787 #endif9788 if (rcStrict == VINF_SUCCESS)9789 {9790 pVCpu->iem.s.cInstructions++;9791 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9792 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER9793 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))9794 { /* likely */ }9795 else9796 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);9797 #endif9798 }9799 if (pVCpu->iem.s.cActiveMappings > 0)9800 {9801 Assert(rcStrict != VINF_SUCCESS);9802 iemMemRollback(pVCpu);9803 }9804 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9805 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9806 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9807 }9808 else if (pVCpu->iem.s.cActiveMappings > 0)9809 iemMemRollback(pVCpu);9810 /** @todo drop this after we bake this change into RIP advancing. */9811 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */9812 }9813 9814 /*9815 * Return value fiddling, statistics and sanity assertions.9816 */9817 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);9818 9819 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));9820 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));9821 return rcStrict;9822 }9823 9824 9825 /**9826 * Execute one instruction.9827 *9828 * @return Strict VBox status code.9829 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9830 */9831 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)9832 {9833 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */9834 #ifdef LOG_ENABLED9835 iemLogCurInstr(pVCpu, true, "IEMExecOne");9836 #endif9837 9838 /*9839 * Do the decoding and emulation.9840 */9841 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);9842 if (rcStrict == VINF_SUCCESS)9843 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");9844 else if (pVCpu->iem.s.cActiveMappings > 0)9845 iemMemRollback(pVCpu);9846 9847 if (rcStrict != VINF_SUCCESS)9848 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",9849 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));9850 return rcStrict;9851 }9852 9853 9854 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,9855 const void *pvOpcodeBytes, size_t cbOpcodeBytes)9856 {9857 VBOXSTRICTRC rcStrict;9858 if ( cbOpcodeBytes9859 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)9860 {9861 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);9862 #ifdef IEM_WITH_CODE_TLB9863 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;9864 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;9865 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);9866 pVCpu->iem.s.offCurInstrStart = 0;9867 pVCpu->iem.s.offInstrNextByte = 0;9868 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;9869 #else9870 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));9871 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);9872 #endif9873 rcStrict = VINF_SUCCESS;9874 }9875 else9876 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);9877 if (rcStrict == VINF_SUCCESS)9878 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");9879 else if (pVCpu->iem.s.cActiveMappings > 0)9880 iemMemRollback(pVCpu);9881 9882 return rcStrict;9883 }9884 9885 9886 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)9887 {9888 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);9889 if (rcStrict == VINF_SUCCESS)9890 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");9891 else if (pVCpu->iem.s.cActiveMappings > 0)9892 iemMemRollback(pVCpu);9893 9894 return rcStrict;9895 }9896 9897 9898 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,9899 const void *pvOpcodeBytes, size_t cbOpcodeBytes)9900 {9901 VBOXSTRICTRC rcStrict;9902 if ( cbOpcodeBytes9903 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)9904 {9905 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);9906 #ifdef IEM_WITH_CODE_TLB9907 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;9908 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;9909 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);9910 pVCpu->iem.s.offCurInstrStart = 0;9911 pVCpu->iem.s.offInstrNextByte = 0;9912 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;9913 #else9914 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));9915 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);9916 #endif9917 rcStrict = VINF_SUCCESS;9918 }9919 else9920 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);9921 if (rcStrict == VINF_SUCCESS)9922 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");9923 else if (pVCpu->iem.s.cActiveMappings > 0)9924 iemMemRollback(pVCpu);9925 9926 return rcStrict;9927 }9928 9929 9930 /**9931 * For handling split cacheline lock operations when the host has split-lock9932 * detection enabled.9933 *9934 * This will cause the interpreter to disregard the lock prefix and implicit9935 * locking (xchg).9936 *9937 * @returns Strict VBox status code.9938 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9939 */9940 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)9941 {9942 /*9943 * Do the decoding and emulation.9944 */9945 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);9946 if (rcStrict == VINF_SUCCESS)9947 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");9948 else if (pVCpu->iem.s.cActiveMappings > 0)9949 iemMemRollback(pVCpu);9950 9951 if (rcStrict != VINF_SUCCESS)9952 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",9953 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));9954 return rcStrict;9955 }9956 9957 9958 /**9959 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to9960 * inject a pending TRPM trap.9961 */9962 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)9963 {9964 Assert(TRPMHasTrap(pVCpu));9965 9966 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)9967 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))9968 {9969 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */9970 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)9971 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);9972 if (fIntrEnabled)9973 {9974 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))9975 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;9976 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))9977 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));9978 else9979 {9980 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));9981 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));9982 }9983 }9984 #else9985 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;9986 #endif9987 if (fIntrEnabled)9988 {9989 uint8_t u8TrapNo;9990 TRPMEVENT enmType;9991 uint32_t uErrCode;9992 RTGCPTR uCr2;9993 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);9994 AssertRC(rc2);9995 Assert(enmType == TRPM_HARDWARE_INT);9996 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);9997 9998 TRPMResetTrap(pVCpu);9999 10000 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10001 /* Injecting an event may cause a VM-exit. */10002 if ( rcStrict != VINF_SUCCESS10003 && rcStrict != VINF_IEM_RAISED_XCPT)10004 return iemExecStatusCodeFiddling(pVCpu, rcStrict);10005 #else10006 NOREF(rcStrict);10007 #endif10008 }10009 }10010 10011 return VINF_SUCCESS;10012 }10013 10014 10015 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)10016 {10017 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;10018 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));10019 Assert(cMaxInstructions > 0);10020 10021 /*10022 * See if there is an interrupt pending in TRPM, inject it if we can.10023 */10024 /** @todo What if we are injecting an exception and not an interrupt? Is that10025 * possible here? For now we assert it is indeed only an interrupt. */10026 if (!TRPMHasTrap(pVCpu))10027 { /* likely */ }10028 else10029 {10030 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);10031 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10032 { /*likely */ }10033 else10034 return rcStrict;10035 }10036 10037 /*10038 * Initial decoder init w/ prefetch, then setup setjmp.10039 */10040 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);10041 if (rcStrict == VINF_SUCCESS)10042 {10043 #ifdef IEM_WITH_SETJMP10044 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */10045 IEM_TRY_SETJMP(pVCpu, rcStrict)10046 #endif10047 {10048 /*10049 * The run loop. We limit ourselves to 4096 instructions right now.10050 */10051 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;10052 PVMCC pVM = pVCpu->CTX_SUFF(pVM);10053 for (;;)10054 {10055 /*10056 * Log the state.10057 */10058 #ifdef LOG_ENABLED10059 iemLogCurInstr(pVCpu, true, "IEMExecLots");10060 #endif10061 10062 /*10063 * Do the decoding and emulation.10064 */10065 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);10066 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);10067 #ifdef VBOX_STRICT10068 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);10069 #endif10070 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10071 {10072 Assert(pVCpu->iem.s.cActiveMappings == 0);10073 pVCpu->iem.s.cInstructions++;10074 10075 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10076 /* Perform any VMX nested-guest instruction boundary actions. */10077 uint64_t fCpu = pVCpu->fLocalForcedActions;10078 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER10079 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))10080 { /* likely */ }10081 else10082 {10083 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);10084 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10085 fCpu = pVCpu->fLocalForcedActions;10086 else10087 {10088 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10089 break;10090 }10091 }10092 #endif10093 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))10094 {10095 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX10096 uint64_t fCpu = pVCpu->fLocalForcedActions;10097 #endif10098 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR310099 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL10100 | VMCPU_FF_TLB_FLUSH10101 | VMCPU_FF_UNHALT );10102 10103 if (RT_LIKELY( ( !fCpu10104 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))10105 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )10106 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))10107 {10108 if (--cMaxInstructionsGccStupidity > 0)10109 {10110 /* Poll timers every now an then according to the caller's specs. */10111 if ( (cMaxInstructionsGccStupidity & cPollRate) != 010112 || !TMTimerPollBool(pVM, pVCpu))10113 {10114 Assert(pVCpu->iem.s.cActiveMappings == 0);10115 iemReInitDecoder(pVCpu);10116 continue;10117 }10118 }10119 }10120 }10121 Assert(pVCpu->iem.s.cActiveMappings == 0);10122 }10123 else if (pVCpu->iem.s.cActiveMappings > 0)10124 iemMemRollback(pVCpu);10125 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10126 break;10127 }10128 }10129 #ifdef IEM_WITH_SETJMP10130 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);10131 {10132 if (pVCpu->iem.s.cActiveMappings > 0)10133 iemMemRollback(pVCpu);10134 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10135 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10136 # endif10137 pVCpu->iem.s.cLongJumps++;10138 }10139 IEM_CATCH_LONGJMP_END(pVCpu);10140 #endif10141 10142 /*10143 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).10144 */10145 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));10146 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));10147 }10148 else10149 {10150 if (pVCpu->iem.s.cActiveMappings > 0)10151 iemMemRollback(pVCpu);10152 10153 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10154 /*10155 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching10156 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.10157 */10158 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10159 #endif10160 }10161 10162 /*10163 * Maybe re-enter raw-mode and log.10164 */10165 if (rcStrict != VINF_SUCCESS)10166 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",10167 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));10168 if (pcInstructions)10169 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;10170 return rcStrict;10171 }10172 10173 10174 /**10175 * Interface used by EMExecuteExec, does exit statistics and limits.10176 *10177 * @returns Strict VBox status code.10178 * @param pVCpu The cross context virtual CPU structure.10179 * @param fWillExit To be defined.10180 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.10181 * @param cMaxInstructions Maximum number of instructions to execute.10182 * @param cMaxInstructionsWithoutExits10183 * The max number of instructions without exits.10184 * @param pStats Where to return statistics.10185 */10186 VMM_INT_DECL(VBOXSTRICTRC)10187 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,10188 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)10189 {10190 NOREF(fWillExit); /** @todo define flexible exit crits */10191 10192 /*10193 * Initialize return stats.10194 */10195 pStats->cInstructions = 0;10196 pStats->cExits = 0;10197 pStats->cMaxExitDistance = 0;10198 pStats->cReserved = 0;10199 10200 /*10201 * Initial decoder init w/ prefetch, then setup setjmp.10202 */10203 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);10204 if (rcStrict == VINF_SUCCESS)10205 {10206 #ifdef IEM_WITH_SETJMP10207 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */10208 IEM_TRY_SETJMP(pVCpu, rcStrict)10209 #endif10210 {10211 #ifdef IN_RING010212 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);10213 #endif10214 uint32_t cInstructionSinceLastExit = 0;10215 10216 /*10217 * The run loop. We limit ourselves to 4096 instructions right now.10218 */10219 PVM pVM = pVCpu->CTX_SUFF(pVM);10220 for (;;)10221 {10222 /*10223 * Log the state.10224 */10225 #ifdef LOG_ENABLED10226 iemLogCurInstr(pVCpu, true, "IEMExecForExits");10227 #endif10228 10229 /*10230 * Do the decoding and emulation.10231 */10232 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;10233 10234 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);10235 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);10236 10237 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits10238 && cInstructionSinceLastExit > 0 /* don't count the first */ )10239 {10240 pStats->cExits += 1;10241 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)10242 pStats->cMaxExitDistance = cInstructionSinceLastExit;10243 cInstructionSinceLastExit = 0;10244 }10245 10246 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10247 {10248 Assert(pVCpu->iem.s.cActiveMappings == 0);10249 pVCpu->iem.s.cInstructions++;10250 pStats->cInstructions++;10251 cInstructionSinceLastExit++;10252 10253 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10254 /* Perform any VMX nested-guest instruction boundary actions. */10255 uint64_t fCpu = pVCpu->fLocalForcedActions;10256 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER10257 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))10258 { /* likely */ }10259 else10260 {10261 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);10262 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10263 fCpu = pVCpu->fLocalForcedActions;10264 else10265 {10266 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10267 break;10268 }10269 }10270 #endif10271 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))10272 {10273 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX10274 uint64_t fCpu = pVCpu->fLocalForcedActions;10275 #endif10276 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR310277 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL10278 | VMCPU_FF_TLB_FLUSH10279 | VMCPU_FF_UNHALT );10280 if (RT_LIKELY( ( ( !fCpu10281 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))10282 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))10283 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )10284 || pStats->cInstructions < cMinInstructions))10285 {10286 if (pStats->cInstructions < cMaxInstructions)10287 {10288 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)10289 {10290 #ifdef IN_RING010291 if ( !fCheckPreemptionPending10292 || !RTThreadPreemptIsPending(NIL_RTTHREAD))10293 #endif10294 {10295 Assert(pVCpu->iem.s.cActiveMappings == 0);10296 iemReInitDecoder(pVCpu);10297 continue;10298 }10299 #ifdef IN_RING010300 rcStrict = VINF_EM_RAW_INTERRUPT;10301 break;10302 #endif10303 }10304 }10305 }10306 Assert(!(fCpu & VMCPU_FF_IEM));10307 }10308 Assert(pVCpu->iem.s.cActiveMappings == 0);10309 }10310 else if (pVCpu->iem.s.cActiveMappings > 0)10311 iemMemRollback(pVCpu);10312 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10313 break;10314 }10315 }10316 #ifdef IEM_WITH_SETJMP10317 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);10318 {10319 if (pVCpu->iem.s.cActiveMappings > 0)10320 iemMemRollback(pVCpu);10321 pVCpu->iem.s.cLongJumps++;10322 }10323 IEM_CATCH_LONGJMP_END(pVCpu);10324 #endif10325 10326 /*10327 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).10328 */10329 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));10330 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));10331 }10332 else10333 {10334 if (pVCpu->iem.s.cActiveMappings > 0)10335 iemMemRollback(pVCpu);10336 10337 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10338 /*10339 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching10340 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.10341 */10342 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10343 #endif10344 }10345 10346 /*10347 * Maybe re-enter raw-mode and log.10348 */10349 if (rcStrict != VINF_SUCCESS)10350 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",10351 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,10352 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));10353 return rcStrict;10354 }10355 10356 10357 /**10358 * Injects a trap, fault, abort, software interrupt or external interrupt.10359 *10360 * The parameter list matches TRPMQueryTrapAll pretty closely.10361 *10362 * @returns Strict VBox status code.10363 * @param pVCpu The cross context virtual CPU structure of the calling EMT.10364 * @param u8TrapNo The trap number.10365 * @param enmType What type is it (trap/fault/abort), software10366 * interrupt or hardware interrupt.10367 * @param uErrCode The error code if applicable.10368 * @param uCr2 The CR2 value if applicable.10369 * @param cbInstr The instruction length (only relevant for10370 * software interrupts).10371 */10372 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,10373 uint8_t cbInstr)10374 {10375 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */10376 #ifdef DBGFTRACE_ENABLED10377 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",10378 u8TrapNo, enmType, uErrCode, uCr2);10379 #endif10380 10381 uint32_t fFlags;10382 switch (enmType)10383 {10384 case TRPM_HARDWARE_INT:10385 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));10386 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;10387 uErrCode = uCr2 = 0;10388 break;10389 10390 case TRPM_SOFTWARE_INT:10391 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));10392 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;10393 uErrCode = uCr2 = 0;10394 break;10395 10396 case TRPM_TRAP:10397 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */10398 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));10399 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;10400 if (u8TrapNo == X86_XCPT_PF)10401 fFlags |= IEM_XCPT_FLAGS_CR2;10402 switch (u8TrapNo)10403 {10404 case X86_XCPT_DF:10405 case X86_XCPT_TS:10406 case X86_XCPT_NP:10407 case X86_XCPT_SS:10408 case X86_XCPT_PF:10409 case X86_XCPT_AC:10410 case X86_XCPT_GP:10411 fFlags |= IEM_XCPT_FLAGS_ERR;10412 break;10413 }10414 break;10415 10416 IEM_NOT_REACHED_DEFAULT_CASE_RET();10417 }10418 10419 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);10420 10421 if (pVCpu->iem.s.cActiveMappings > 0)10422 iemMemRollback(pVCpu);10423 10424 return rcStrict;10425 }10426 10427 10428 /**10429 * Injects the active TRPM event.10430 *10431 * @returns Strict VBox status code.10432 * @param pVCpu The cross context virtual CPU structure.10433 */10434 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)10435 {10436 #ifndef IEM_IMPLEMENTS_TASKSWITCH10437 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));10438 #else10439 uint8_t u8TrapNo;10440 TRPMEVENT enmType;10441 uint32_t uErrCode;10442 RTGCUINTPTR uCr2;10443 uint8_t cbInstr;10444 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);10445 if (RT_FAILURE(rc))10446 return rc;10447 10448 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle10449 * ICEBP \#DB injection as a special case. */10450 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);10451 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM10452 if (rcStrict == VINF_SVM_VMEXIT)10453 rcStrict = VINF_SUCCESS;10454 #endif10455 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10456 if (rcStrict == VINF_VMX_VMEXIT)10457 rcStrict = VINF_SUCCESS;10458 #endif10459 /** @todo Are there any other codes that imply the event was successfully10460 * delivered to the guest? See @bugref{6607}. */10461 if ( rcStrict == VINF_SUCCESS10462 || rcStrict == VINF_IEM_RAISED_XCPT)10463 TRPMResetTrap(pVCpu);10464 10465 return rcStrict;10466 #endif10467 }10468 10469 10470 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)10471 {10472 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);10473 return VERR_NOT_IMPLEMENTED;10474 }10475 10476 10477 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)10478 {10479 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);10480 return VERR_NOT_IMPLEMENTED;10481 }10482 52 10483 53 … … 11246 816 } 11247 817 11248 11249 /**11250 * Checks if IEM is in the process of delivering an event (interrupt or11251 * exception).11252 *11253 * @returns true if we're in the process of raising an interrupt or exception,11254 * false otherwise.11255 * @param pVCpu The cross context virtual CPU structure.11256 * @param puVector Where to store the vector associated with the11257 * currently delivered event, optional.11258 * @param pfFlags Where to store th event delivery flags (see11259 * IEM_XCPT_FLAGS_XXX), optional.11260 * @param puErr Where to store the error code associated with the11261 * event, optional.11262 * @param puCr2 Where to store the CR2 associated with the event,11263 * optional.11264 * @remarks The caller should check the flags to determine if the error code and11265 * CR2 are valid for the event.11266 */11267 VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)11268 {11269 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;11270 if (fRaisingXcpt)11271 {11272 if (puVector)11273 *puVector = pVCpu->iem.s.uCurXcpt;11274 if (pfFlags)11275 *pfFlags = pVCpu->iem.s.fCurXcpt;11276 if (puErr)11277 *puErr = pVCpu->iem.s.uCurXcptErr;11278 if (puCr2)11279 *puCr2 = pVCpu->iem.s.uCurXcptCr2;11280 }11281 return fRaisingXcpt;11282 }11283 11284 #ifdef IN_RING311285 11286 /**11287 * Handles the unlikely and probably fatal merge cases.11288 *11289 * @returns Merged status code.11290 * @param rcStrict Current EM status code.11291 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge11292 * with @a rcStrict.11293 * @param iMemMap The memory mapping index. For error reporting only.11294 * @param pVCpu The cross context virtual CPU structure of the calling11295 * thread, for error reporting only.11296 */11297 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,11298 unsigned iMemMap, PVMCPUCC pVCpu)11299 {11300 if (RT_FAILURE_NP(rcStrict))11301 return rcStrict;11302 11303 if (RT_FAILURE_NP(rcStrictCommit))11304 return rcStrictCommit;11305 11306 if (rcStrict == rcStrictCommit)11307 return rcStrictCommit;11308 11309 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",11310 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,11311 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,11312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,11313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));11314 return VERR_IOM_FF_STATUS_IPE;11315 }11316 11317 11318 /**11319 * Helper for IOMR3ProcessForceFlag.11320 *11321 * @returns Merged status code.11322 * @param rcStrict Current EM status code.11323 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge11324 * with @a rcStrict.11325 * @param iMemMap The memory mapping index. For error reporting only.11326 * @param pVCpu The cross context virtual CPU structure of the calling11327 * thread, for error reporting only.11328 */11329 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)11330 {11331 /* Simple. */11332 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))11333 return rcStrictCommit;11334 11335 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))11336 return rcStrict;11337 11338 /* EM scheduling status codes. */11339 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST11340 && rcStrict <= VINF_EM_LAST))11341 {11342 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST11343 && rcStrictCommit <= VINF_EM_LAST))11344 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;11345 }11346 11347 /* Unlikely */11348 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);11349 }11350 11351 11352 /**11353 * Called by force-flag handling code when VMCPU_FF_IEM is set.11354 *11355 * @returns Merge between @a rcStrict and what the commit operation returned.11356 * @param pVM The cross context VM structure.11357 * @param pVCpu The cross context virtual CPU structure of the calling EMT.11358 * @param rcStrict The status code returned by ring-0 or raw-mode.11359 */11360 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)11361 {11362 /*11363 * Reset the pending commit.11364 */11365 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)11366 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),11367 ("%#x %#x %#x\n",11368 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));11369 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);11370 11371 /*11372 * Commit the pending bounce buffers (usually just one).11373 */11374 unsigned cBufs = 0;11375 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);11376 while (iMemMap-- > 0)11377 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))11378 {11379 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);11380 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);11381 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);11382 11383 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;11384 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;11385 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];11386 11387 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)11388 {11389 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,11390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,11391 pbBuf,11392 cbFirst,11393 PGMACCESSORIGIN_IEM);11394 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);11395 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",11396 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,11397 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));11398 }11399 11400 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)11401 {11402 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,11403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,11404 pbBuf + cbFirst,11405 cbSecond,11406 PGMACCESSORIGIN_IEM);11407 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);11408 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",11409 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,11410 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));11411 }11412 cBufs++;11413 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;11414 }11415 11416 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,11417 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,11418 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));11419 pVCpu->iem.s.cActiveMappings = 0;11420 return rcStrict;11421 }11422 11423 #endif /* IN_RING3 */11424 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllHlpFpu-x86.cpp
r108195 r108220 1 1 /* $Id$ */ 2 2 /** @file 3 * IEM - Interpreted Execution Manager - All Contexts.3 * IEM - Interpreted Execution Manager - x86 target, FPU helpers. 4 4 */ 5 5 … … 25 25 * SPDX-License-Identifier: GPL-3.0-only 26 26 */ 27 28 29 /** @page pg_iem IEM - Interpreted Execution Manager30 *31 * The interpreted exeuction manager (IEM) is for executing short guest code32 * sequences that are causing too many exits / virtualization traps. It will33 * also be used to interpret single instructions, thus replacing the selective34 * interpreters in EM and IOM.35 *36 * Design goals:37 * - Relatively small footprint, although we favour speed and correctness38 * over size.39 * - Reasonably fast.40 * - Correctly handle lock prefixed instructions.41 * - Complete instruction set - eventually.42 * - Refactorable into a recompiler, maybe.43 * - Replace EMInterpret*.44 *45 * Using the existing disassembler has been considered, however this is thought46 * to conflict with speed as the disassembler chews things a bit too much while47 * leaving us with a somewhat complicated state to interpret afterwards.48 *49 *50 * The current code is very much work in progress. You've been warned!51 *52 *53 * @section sec_iem_fpu_instr FPU Instructions54 *55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the56 * same or equivalent instructions on the host FPU. To make life easy, we also57 * let the FPU prioritize the unmasked exceptions for us. This however, only58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 1359 * for FPU exception delivery, because with CR0.NE=0 there is a window where we60 * can trigger spurious FPU exceptions.61 *62 * The guest FPU state is not loaded into the host CPU and kept there till we63 * leave IEM because the calling conventions have declared an all year open64 * season on much of the FPU state. For instance an innocent looking call to65 * memcpy might end up using a whole bunch of XMM or MM registers if the66 * particular implementation finds it worthwhile.67 *68 *69 * @section sec_iem_logging Logging70 *71 * The IEM code uses the \"IEM\" log group for the main logging. The different72 * logging levels/flags are generally used for the following purposes:73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.74 * - Flow (LogFlow) : Basic enter/exit IEM state info.75 * - Level 2 (Log2) : ?76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.78 * - Level 5 (Log5) : Decoding details.79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.80 * - Level 7 (Log7) : iret++ execution logging.81 * - Level 8 (Log8) :82 * - Level 9 (Log9) :83 * - Level 10 (Log10): TLBs.84 * - Level 11 (Log11): Unmasked FPU exceptions.85 *86 * The \"IEM_MEM\" log group covers most of memory related details logging,87 * except for errors and exceptions:88 * - Level 1 (Log) : Reads.89 * - Level 2 (Log2) : Read fallbacks.90 * - Level 3 (Log3) : MemMap read.91 * - Level 4 (Log4) : MemMap read fallbacks.92 * - Level 5 (Log5) : Writes93 * - Level 6 (Log6) : Write fallbacks.94 * - Level 7 (Log7) : MemMap writes and read-writes.95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.96 * - Level 9 (Log9) : Stack reads.97 * - Level 10 (Log10): Stack read fallbacks.98 * - Level 11 (Log11): Stack writes.99 * - Level 12 (Log12): Stack write fallbacks.100 * - Flow (LogFlow) :101 *102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:103 * - Level 1 (Log) : Errors and other major events.104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)105 * - Level 2 (Log2) : VM exits.106 *107 * The syscall logging level assignments:108 * - Level 1: DOS and BIOS.109 * - Level 2: Windows 3.x110 * - Level 3: Linux.111 */112 113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */114 #ifdef _MSC_VER115 # pragma warning(disable:4505)116 #endif117 27 118 28 … … 127 37 #include <VBox/vmm/iem.h> 128 38 #include <VBox/vmm/cpum.h> 129 #include <VBox/vmm/pdmapic.h>130 39 #include <VBox/vmm/pdm.h> 131 40 #include <VBox/vmm/pgm.h> 132 #include <VBox/vmm/iom.h>133 #include <VBox/vmm/em.h>134 #include <VBox/vmm/hm.h>135 #include <VBox/vmm/nem.h>136 #include <VBox/vmm/gcm.h>137 #include <VBox/vmm/gim.h>138 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM139 # include <VBox/vmm/em.h>140 # include <VBox/vmm/hm_svm.h>141 #endif142 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX143 # include <VBox/vmm/hmvmxinline.h>144 #endif145 41 #include <VBox/vmm/tm.h> 146 42 #include <VBox/vmm/dbgf.h> … … 150 46 #include <VBox/log.h> 151 47 #include <VBox/err.h> 152 #include <VBox/param.h>153 #include <VBox/dis.h>154 #include <iprt/asm-math.h>155 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)156 # include <iprt/asm-amd64-x86.h>157 #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)158 # include <iprt/asm-arm.h>159 #endif160 48 #include <iprt/assert.h> 161 49 #include <iprt/string.h> … … 163 51 164 52 #include "IEMInline.h" 165 166 167 /*********************************************************************************************************************************168 * Structures and Typedefs *169 *********************************************************************************************************************************/170 /**171 * CPU exception classes.172 */173 typedef enum IEMXCPTCLASS174 {175 IEMXCPTCLASS_BENIGN,176 IEMXCPTCLASS_CONTRIBUTORY,177 IEMXCPTCLASS_PAGE_FAULT,178 IEMXCPTCLASS_DOUBLE_FAULT179 } IEMXCPTCLASS;180 181 182 /*********************************************************************************************************************************183 * Global Variables *184 *********************************************************************************************************************************/185 #if defined(IEM_LOG_MEMORY_WRITES)186 /** What IEM just wrote. */187 uint8_t g_abIemWrote[256];188 /** How much IEM just wrote. */189 size_t g_cbIemWrote;190 #endif191 192 193 /*********************************************************************************************************************************194 * Internal Functions *195 *********************************************************************************************************************************/196 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,197 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;198 199 200 /**201 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code202 * path.203 *204 * This will also invalidate TLB entries for any pages with active data205 * breakpoints on them.206 *207 * @returns IEM_F_BRK_PENDING_XXX or zero.208 * @param pVCpu The cross context virtual CPU structure of the209 * calling thread.210 *211 * @note Don't call directly, use iemCalcExecDbgFlags instead.212 */213 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)214 {215 uint32_t fExec = 0;216 217 /*218 * Helper for invalidate the data TLB for breakpoint addresses.219 *220 * This is to make sure any access to the page will always trigger a TLB221 * load for as long as the breakpoint is enabled.222 */223 #ifdef IEM_WITH_DATA_TLB224 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \225 RTGCPTR uTagNoRev = (a_uValue); \226 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \227 /** @todo do large page accounting */ \228 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \229 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \230 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \231 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \232 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \233 } while (0)234 #else235 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)236 #endif237 238 /*239 * Process guest breakpoints.240 */241 #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \242 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \243 { \244 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \245 { \246 case X86_DR7_RW_EO: \247 fExec |= IEM_F_PENDING_BRK_INSTR; \248 break; \249 case X86_DR7_RW_WO: \250 case X86_DR7_RW_RW: \251 fExec |= IEM_F_PENDING_BRK_DATA; \252 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \253 break; \254 case X86_DR7_RW_IO: \255 fExec |= IEM_F_PENDING_BRK_X86_IO; \256 break; \257 } \258 } \259 } while (0)260 261 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];262 if (fGstDr7 & X86_DR7_ENABLED_MASK)263 {264 /** @todo extract more details here to simplify matching later. */265 #ifdef IEM_WITH_DATA_TLB266 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);267 #endif268 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);269 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);270 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);271 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);272 }273 274 /*275 * Process hypervisor breakpoints.276 */277 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);278 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);279 if (fHyperDr7 & X86_DR7_ENABLED_MASK)280 {281 /** @todo extract more details here to simplify matching later. */282 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));283 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));284 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));285 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));286 }287 288 return fExec;289 }290 291 292 /**293 * Initializes the decoder state.294 *295 * iemReInitDecoder is mostly a copy of this function.296 *297 * @param pVCpu The cross context virtual CPU structure of the298 * calling thread.299 * @param fExecOpts Optional execution flags:300 * - IEM_F_BYPASS_HANDLERS301 * - IEM_F_X86_DISREGARD_LOCK302 */303 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)304 {305 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);306 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));313 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));314 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));315 316 /* Execution state: */317 uint32_t fExec;318 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;319 320 /* Decoder state: */321 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */322 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;323 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)324 {325 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */326 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;327 }328 else329 {330 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;331 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;332 }333 pVCpu->iem.s.fPrefixes = 0;334 pVCpu->iem.s.uRexReg = 0;335 pVCpu->iem.s.uRexB = 0;336 pVCpu->iem.s.uRexIndex = 0;337 pVCpu->iem.s.idxPrefix = 0;338 pVCpu->iem.s.uVex3rdReg = 0;339 pVCpu->iem.s.uVexLength = 0;340 pVCpu->iem.s.fEvexStuff = 0;341 pVCpu->iem.s.iEffSeg = X86_SREG_DS;342 #ifdef IEM_WITH_CODE_TLB343 pVCpu->iem.s.pbInstrBuf = NULL;344 pVCpu->iem.s.offInstrNextByte = 0;345 pVCpu->iem.s.offCurInstrStart = 0;346 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF347 pVCpu->iem.s.offOpcode = 0;348 # endif349 # ifdef VBOX_STRICT350 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;351 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;352 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;353 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);354 # endif355 #else356 pVCpu->iem.s.offOpcode = 0;357 pVCpu->iem.s.cbOpcode = 0;358 #endif359 pVCpu->iem.s.offModRm = 0;360 pVCpu->iem.s.cActiveMappings = 0;361 pVCpu->iem.s.iNextMapping = 0;362 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;363 364 #ifdef DBGFTRACE_ENABLED365 switch (IEM_GET_CPU_MODE(pVCpu))366 {367 case IEMMODE_64BIT:368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);369 break;370 case IEMMODE_32BIT:371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);372 break;373 case IEMMODE_16BIT:374 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);375 break;376 }377 #endif378 }379 380 381 /**382 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.383 *384 * This is mostly a copy of iemInitDecoder.385 *386 * @param pVCpu The cross context virtual CPU structure of the calling EMT.387 */388 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)389 {390 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));399 400 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */401 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),402 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));403 404 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);405 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */406 pVCpu->iem.s.enmEffAddrMode = enmMode;407 if (enmMode != IEMMODE_64BIT)408 {409 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */410 pVCpu->iem.s.enmEffOpSize = enmMode;411 }412 else413 {414 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;415 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;416 }417 pVCpu->iem.s.fPrefixes = 0;418 pVCpu->iem.s.uRexReg = 0;419 pVCpu->iem.s.uRexB = 0;420 pVCpu->iem.s.uRexIndex = 0;421 pVCpu->iem.s.idxPrefix = 0;422 pVCpu->iem.s.uVex3rdReg = 0;423 pVCpu->iem.s.uVexLength = 0;424 pVCpu->iem.s.fEvexStuff = 0;425 pVCpu->iem.s.iEffSeg = X86_SREG_DS;426 #ifdef IEM_WITH_CODE_TLB427 if (pVCpu->iem.s.pbInstrBuf)428 {429 uint64_t off = (enmMode == IEMMODE_64BIT430 ? pVCpu->cpum.GstCtx.rip431 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)432 - pVCpu->iem.s.uInstrBufPc;433 if (off < pVCpu->iem.s.cbInstrBufTotal)434 {435 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;436 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;437 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)438 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;439 else440 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;441 }442 else443 {444 pVCpu->iem.s.pbInstrBuf = NULL;445 pVCpu->iem.s.offInstrNextByte = 0;446 pVCpu->iem.s.offCurInstrStart = 0;447 pVCpu->iem.s.cbInstrBuf = 0;448 pVCpu->iem.s.cbInstrBufTotal = 0;449 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;450 }451 }452 else453 {454 pVCpu->iem.s.offInstrNextByte = 0;455 pVCpu->iem.s.offCurInstrStart = 0;456 pVCpu->iem.s.cbInstrBuf = 0;457 pVCpu->iem.s.cbInstrBufTotal = 0;458 # ifdef VBOX_STRICT459 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;460 # endif461 }462 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF463 pVCpu->iem.s.offOpcode = 0;464 # endif465 #else /* !IEM_WITH_CODE_TLB */466 pVCpu->iem.s.cbOpcode = 0;467 pVCpu->iem.s.offOpcode = 0;468 #endif /* !IEM_WITH_CODE_TLB */469 pVCpu->iem.s.offModRm = 0;470 Assert(pVCpu->iem.s.cActiveMappings == 0);471 pVCpu->iem.s.iNextMapping = 0;472 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);473 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));474 475 #ifdef DBGFTRACE_ENABLED476 switch (enmMode)477 {478 case IEMMODE_64BIT:479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);480 break;481 case IEMMODE_32BIT:482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);483 break;484 case IEMMODE_16BIT:485 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);486 break;487 }488 #endif489 }490 491 492 493 /**494 * Prefetch opcodes the first time when starting executing.495 *496 * @returns Strict VBox status code.497 * @param pVCpu The cross context virtual CPU structure of the498 * calling thread.499 * @param fExecOpts Optional execution flags:500 * - IEM_F_BYPASS_HANDLERS501 * - IEM_F_X86_DISREGARD_LOCK502 */503 static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT504 {505 iemInitDecoder(pVCpu, fExecOpts);506 507 #ifndef IEM_WITH_CODE_TLB508 /*509 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.510 *511 * First translate CS:rIP to a physical address.512 *513 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch514 * all relevant bytes from the first page, as it ASSUMES it's only ever515 * called for dealing with CS.LIM, page crossing and instructions that516 * are too long.517 */518 uint32_t cbToTryRead;519 RTGCPTR GCPtrPC;520 if (IEM_IS_64BIT_CODE(pVCpu))521 {522 cbToTryRead = GUEST_PAGE_SIZE;523 GCPtrPC = pVCpu->cpum.GstCtx.rip;524 if (IEM_IS_CANONICAL(GCPtrPC))525 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);526 else527 return iemRaiseGeneralProtectionFault0(pVCpu);528 }529 else530 {531 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;532 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));533 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)534 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;535 else536 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);537 if (cbToTryRead) { /* likely */ }538 else /* overflowed */539 {540 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);541 cbToTryRead = UINT32_MAX;542 }543 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;544 Assert(GCPtrPC <= UINT32_MAX);545 }546 547 PGMPTWALKFAST WalkFast;548 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,549 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,550 &WalkFast);551 if (RT_SUCCESS(rc))552 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);553 else554 {555 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));556 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT557 /** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't558 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */559 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)560 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);561 # endif562 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);563 }564 #if 0565 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }566 else567 {568 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));569 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT570 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/571 # error completely wrong572 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)573 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);574 # endif575 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);576 }577 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }578 else579 {580 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));581 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT582 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/583 # error completely wrong.584 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)585 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);586 # endif587 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);588 }589 #else590 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);591 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));592 #endif593 RTGCPHYS const GCPhys = WalkFast.GCPhys;594 595 /*596 * Read the bytes at this address.597 */598 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);599 if (cbToTryRead > cbLeftOnPage)600 cbToTryRead = cbLeftOnPage;601 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))602 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);603 604 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))605 {606 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);607 if (RT_LIKELY(rcStrict == VINF_SUCCESS))608 { /* likely */ }609 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))610 {611 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",612 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));613 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);614 }615 else616 {617 Log((RT_SUCCESS(rcStrict)618 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"619 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",620 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));621 return rcStrict;622 }623 }624 else625 {626 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);627 if (RT_SUCCESS(rc))628 { /* likely */ }629 else630 {631 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",632 GCPtrPC, GCPhys, rc, cbToTryRead));633 return rc;634 }635 }636 pVCpu->iem.s.cbOpcode = cbToTryRead;637 #endif /* !IEM_WITH_CODE_TLB */638 return VINF_SUCCESS;639 }640 641 642 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)643 /**644 * Helper for doing large page accounting at TLB load time.645 */646 template<bool const a_fGlobal>647 DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)648 {649 if (a_fGlobal)650 pTlb->cTlbGlobalLargePageCurLoads++;651 else652 pTlb->cTlbNonGlobalLargePageCurLoads++;653 654 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP655 RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;656 ASMBitSet(pTlb->bmLargePage, idxBit);657 # endif658 659 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);660 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;661 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal662 ? &pTlb->GlobalLargePageRange663 : &pTlb->NonGlobalLargePageRange;664 uTagNoRev &= ~(RTGCPTR)fMask;665 if (uTagNoRev < pRange->uFirstTag)666 pRange->uFirstTag = uTagNoRev;667 668 uTagNoRev |= fMask;669 if (uTagNoRev > pRange->uLastTag)670 pRange->uLastTag = uTagNoRev;671 672 RT_NOREF_PV(pVCpu);673 }674 #endif675 676 677 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)678 /**679 * Worker for iemTlbInvalidateAll.680 */681 template<bool a_fGlobal>682 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)683 {684 if (!a_fGlobal)685 pTlb->cTlsFlushes++;686 else687 pTlb->cTlsGlobalFlushes++;688 689 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;690 if (RT_LIKELY(pTlb->uTlbRevision != 0))691 { /* very likely */ }692 else693 {694 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;695 pTlb->cTlbRevisionRollovers++;696 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;697 while (i-- > 0)698 pTlb->aEntries[i * 2].uTag = 0;699 }700 701 pTlb->cTlbNonGlobalLargePageCurLoads = 0;702 pTlb->NonGlobalLargePageRange.uLastTag = 0;703 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;704 705 if (a_fGlobal)706 {707 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;708 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))709 { /* very likely */ }710 else711 {712 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;713 pTlb->cTlbRevisionRollovers++;714 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;715 while (i-- > 0)716 pTlb->aEntries[i * 2 + 1].uTag = 0;717 }718 719 pTlb->cTlbGlobalLargePageCurLoads = 0;720 pTlb->GlobalLargePageRange.uLastTag = 0;721 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;722 }723 }724 #endif725 726 727 /**728 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.729 */730 template<bool a_fGlobal>731 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)732 {733 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)734 Log10(("IEMTlbInvalidateAll\n"));735 736 # ifdef IEM_WITH_CODE_TLB737 pVCpu->iem.s.cbInstrBufTotal = 0;738 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);739 if (a_fGlobal)740 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);741 else742 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);743 # endif744 745 # ifdef IEM_WITH_DATA_TLB746 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);747 if (a_fGlobal)748 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);749 else750 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);751 # endif752 #else753 RT_NOREF(pVCpu);754 #endif755 }756 757 758 /**759 * Invalidates non-global the IEM TLB entries.760 *761 * This is called internally as well as by PGM when moving GC mappings.762 *763 * @param pVCpu The cross context virtual CPU structure of the calling764 * thread.765 */766 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)767 {768 iemTlbInvalidateAll<false>(pVCpu);769 }770 771 772 /**773 * Invalidates all the IEM TLB entries.774 *775 * This is called internally as well as by PGM when moving GC mappings.776 *777 * @param pVCpu The cross context virtual CPU structure of the calling778 * thread.779 */780 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)781 {782 iemTlbInvalidateAll<true>(pVCpu);783 }784 785 786 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)787 788 /** @todo graduate this to cdefs.h or asm-mem.h. */789 # ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */790 # undef RT_CACHELINE_SIZE791 # define RT_CACHELINE_SIZE 128792 # endif793 794 # if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))795 # define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)796 # elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))797 # define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))798 # elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)799 # define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)800 # else801 # define MY_PREFETCH(a_pvAddr) ((void)0)802 # endif803 # if 0804 # undef MY_PREFETCH805 # define MY_PREFETCH(a_pvAddr) ((void)0)806 # endif807 808 /** @def MY_PREFETCH_64809 * 64 byte prefetch hint, could be more depending on cache line size. */810 /** @def MY_PREFETCH_128811 * 128 byte prefetch hint. */812 /** @def MY_PREFETCH_256813 * 256 byte prefetch hint. */814 # if RT_CACHELINE_SIZE >= 128815 /* 128 byte cache lines */816 # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)817 # define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)818 # define MY_PREFETCH_256(a_pvAddr) do { \819 MY_PREFETCH(a_pvAddr); \820 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \821 } while (0)822 # else823 /* 64 byte cache lines */824 # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)825 # define MY_PREFETCH_128(a_pvAddr) do { \826 MY_PREFETCH(a_pvAddr); \827 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \828 } while (0)829 # define MY_PREFETCH_256(a_pvAddr) do { \830 MY_PREFETCH(a_pvAddr); \831 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \832 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \833 MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \834 } while (0)835 # endif836 837 template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>838 DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,839 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT840 {841 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);842 AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */843 844 if (a_fGlobal)845 pTlb->cTlbInvlPgLargeGlobal += 1;846 if (a_fNonGlobal)847 pTlb->cTlbInvlPgLargeNonGlobal += 1;848 849 /*850 * Set up the scan.851 *852 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map853 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]854 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask855 * that fold large page offsets 1MB-2MB into the 0-1MB range.856 *857 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff858 *859 * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for860 * relevant host architectures.861 */862 /** @todo benchmark this code from the guest side. */863 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);864 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP865 uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;866 uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64867 : IEMTLB_ENTRY_COUNT * 2 / 64;868 #else869 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;870 MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);871 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;872 #endif873 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0874 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)875 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));876 877 /*878 * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.879 * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.880 */881 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);882 if ( !a_fDataTlb883 && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))884 pVCpu->iem.s.cbInstrBufTotal = 0;885 886 /*887 * Combine TAG values with the TLB revisions.888 */889 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;890 if (a_fNonGlobal)891 GCPtrTag |= pTlb->uTlbRevision;892 893 /*894 * Do the scanning.895 */896 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP897 uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX898 : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);899 /* Scan bitmap entries (64 bits at the time): */900 for (;;)901 {902 # if 1903 uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;904 if (bmEntry)905 {906 /* Scan the non-zero 64-bit value in groups of 8 bits: */907 uint64_t bmToClear = 0;908 uintptr_t idxEven = idxBitmap * 64;909 uint32_t idxTag = 0;910 for (;;)911 {912 if (bmEntry & 0xff)913 {914 # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \915 if (a_fNonGlobal) \916 { \917 if (bmEntry & a_bmNonGlobal) \918 { \919 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \920 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \921 { \922 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \923 pTlb->aEntries[a_idxEvenIter].GCPhys, \924 a_idxEvenIter, a_fDataTlb); \925 pTlb->aEntries[a_idxEvenIter].uTag = 0; \926 bmToClearSub8 |= a_bmNonGlobal; \927 } \928 } \929 else \930 Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\931 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \932 != (GCPtrTag & IEMTLB_REVISION_MASK)); \933 } \934 if (a_fGlobal) \935 { \936 if (bmEntry & a_bmGlobal) \937 { \938 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \939 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \940 { \941 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \942 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \943 a_idxEvenIter + 1, a_fDataTlb); \944 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \945 bmToClearSub8 |= a_bmGlobal; \946 } \947 } \948 else \949 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\950 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \951 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \952 }953 uint64_t bmToClearSub8 = 0;954 ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)955 ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)956 ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)957 ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)958 bmToClear |= bmToClearSub8 << (idxTag * 2);959 # undef ONE_PAIR960 }961 962 /* advance to the next 8 bits. */963 bmEntry >>= 8;964 if (!bmEntry)965 break;966 idxEven += 8;967 idxTag += 4;968 }969 970 /* Clear the large page flags we covered. */971 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;972 }973 # else974 uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;975 if (bmEntry)976 {977 /* Scan the non-zero 64-bit value completely unrolled: */978 uintptr_t const idxEven = idxBitmap * 64;979 uint64_t bmToClear = 0;980 # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \981 if (a_fNonGlobal) \982 { \983 if (bmEntry & a_bmNonGlobal) \984 { \985 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \986 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \987 { \988 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \989 pTlb->aEntries[a_idxEvenIter].GCPhys, \990 a_idxEvenIter, a_fDataTlb); \991 pTlb->aEntries[a_idxEvenIter].uTag = 0; \992 bmToClear |= a_bmNonGlobal; \993 } \994 } \995 else \996 Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\997 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \998 != (GCPtrTag & IEMTLB_REVISION_MASK)); \999 } \1000 if (a_fGlobal) \1001 { \1002 if (bmEntry & a_bmGlobal) \1003 { \1004 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \1005 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \1006 { \1007 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \1008 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \1009 a_idxEvenIter + 1, a_fDataTlb); \1010 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \1011 bmToClear |= a_bmGlobal; \1012 } \1013 } \1014 else \1015 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\1016 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \1017 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \1018 } ((void)0)1019 # define FOUR_PAIRS(a_iByte, a_cShift) \1020 ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \1021 ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \1022 ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \1023 ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)1024 if (bmEntry & (uint32_t)UINT16_MAX)1025 {1026 FOUR_PAIRS(0, 0);1027 FOUR_PAIRS(1, 8);1028 }1029 if (bmEntry & ((uint32_t)UINT16_MAX << 16))1030 {1031 FOUR_PAIRS(2, 16);1032 FOUR_PAIRS(3, 24);1033 }1034 if (bmEntry & ((uint64_t)UINT16_MAX << 32))1035 {1036 FOUR_PAIRS(4, 32);1037 FOUR_PAIRS(5, 40);1038 }1039 if (bmEntry & ((uint64_t)UINT16_MAX << 16))1040 {1041 FOUR_PAIRS(6, 48);1042 FOUR_PAIRS(7, 56);1043 }1044 # undef FOUR_PAIRS1045 1046 /* Clear the large page flags we covered. */1047 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;1048 }1049 # endif1050 1051 /* advance */1052 idxBitmap++;1053 if (idxBitmap >= idxBitmapEnd)1054 break;1055 if (a_fNonGlobal)1056 GCPtrTag += 32;1057 if (a_fGlobal)1058 GCPtrTagGlob += 32;1059 }1060 1061 #else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */1062 1063 for (; idxEven < idxEvenEnd; idxEven += 8)1064 {1065 # define ONE_ITERATION(a_idxEvenIter) \1066 if (a_fNonGlobal) \1067 { \1068 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \1069 { \1070 if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \1071 { \1072 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \1073 a_idxEvenIter, a_fDataTlb); \1074 pTlb->aEntries[a_idxEvenIter].uTag = 0; \1075 } \1076 } \1077 GCPtrTag++; \1078 } \1079 \1080 if (a_fGlobal) \1081 { \1082 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \1083 { \1084 if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \1085 { \1086 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \1087 a_idxEvenIter + 1, a_fDataTlb); \1088 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \1089 } \1090 } \1091 GCPtrTagGlob++; \1092 }1093 if (idxEven < idxEvenEnd - 4)1094 MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);1095 ONE_ITERATION(idxEven)1096 ONE_ITERATION(idxEven + 2)1097 ONE_ITERATION(idxEven + 4)1098 ONE_ITERATION(idxEven + 6)1099 # undef ONE_ITERATION1100 }1101 #endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */1102 }1103 1104 template<bool const a_fDataTlb, bool const a_f2MbLargePage>1105 DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,1106 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT1107 {1108 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);1109 1110 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);1111 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag1112 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)1113 {1114 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag1115 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)1116 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1117 else1118 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1119 }1120 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag1121 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)1122 {1123 /* Large pages aren't as likely in the non-global TLB half. */1124 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);1125 }1126 else1127 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1128 }1129 1130 template<bool const a_fDataTlb>1131 DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT1132 {1133 pTlb->cTlbInvlPg += 1;1134 1135 /*1136 * Flush the entry pair.1137 */1138 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))1139 {1140 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);1141 pTlb->aEntries[idxEven].uTag = 0;1142 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))1143 pVCpu->iem.s.cbInstrBufTotal = 0;1144 }1145 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))1146 {1147 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);1148 pTlb->aEntries[idxEven + 1].uTag = 0;1149 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))1150 pVCpu->iem.s.cbInstrBufTotal = 0;1151 }1152 1153 /*1154 * If there are (or has been) large pages in the TLB, we must check if the1155 * address being flushed may involve one of those, as then we'd have to1156 * scan for entries relating to the same page and flush those as well.1157 */1158 # if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */1159 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)1160 # else1161 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)1162 # endif1163 {1164 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);1165 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)1166 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1167 else1168 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1169 }1170 }1171 1172 #endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */1173 1174 /**1175 * Invalidates a page in the TLBs.1176 *1177 * @param pVCpu The cross context virtual CPU structure of the calling1178 * thread.1179 * @param GCPtr The address of the page to invalidate1180 * @thread EMT(pVCpu)1181 */1182 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)1183 {1184 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);1185 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1186 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));1187 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);1188 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));1189 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);1190 1191 # ifdef IEM_WITH_CODE_TLB1192 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);1193 # endif1194 # ifdef IEM_WITH_DATA_TLB1195 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);1196 # endif1197 #else1198 NOREF(pVCpu); NOREF(GCPtr);1199 #endif1200 }1201 1202 1203 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1204 /**1205 * Invalid both TLBs slow fashion following a rollover.1206 *1207 * Worker for IEMTlbInvalidateAllPhysical,1208 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,1209 * iemMemMapJmp and others.1210 *1211 * @thread EMT(pVCpu)1212 */1213 static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)1214 {1215 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));1216 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);1217 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);1218 1219 unsigned i;1220 # ifdef IEM_WITH_CODE_TLB1221 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);1222 while (i-- > 0)1223 {1224 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;1225 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ1226 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);1227 }1228 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;1229 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1230 # endif1231 # ifdef IEM_WITH_DATA_TLB1232 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);1233 while (i-- > 0)1234 {1235 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;1236 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ1237 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);1238 }1239 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;1240 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1241 # endif1242 1243 }1244 #endif1245 1246 1247 /**1248 * Invalidates the host physical aspects of the IEM TLBs.1249 *1250 * This is called internally as well as by PGM when moving GC mappings.1251 *1252 * @param pVCpu The cross context virtual CPU structure of the calling1253 * thread.1254 * @note Currently not used.1255 */1256 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)1257 {1258 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1259 /* Note! This probably won't end up looking exactly like this, but it give an idea... */1260 Log10(("IEMTlbInvalidateAllPhysical\n"));1261 1262 # ifdef IEM_WITH_CODE_TLB1263 pVCpu->iem.s.cbInstrBufTotal = 0;1264 # endif1265 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;1266 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))1267 {1268 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;1269 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1270 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;1271 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1272 }1273 else1274 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1275 #else1276 NOREF(pVCpu);1277 #endif1278 }1279 1280 1281 /**1282 * Invalidates the host physical aspects of the IEM TLBs.1283 *1284 * This is called internally as well as by PGM when moving GC mappings.1285 *1286 * @param pVM The cross context VM structure.1287 * @param idCpuCaller The ID of the calling EMT if available to the caller,1288 * otherwise NIL_VMCPUID.1289 * @param enmReason The reason we're called.1290 *1291 * @remarks Caller holds the PGM lock.1292 */1293 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)1294 {1295 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1296 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);1297 if (pVCpuCaller)1298 VMCPU_ASSERT_EMT(pVCpuCaller);1299 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);1300 1301 VMCC_FOR_EACH_VMCPU(pVM)1302 {1303 # ifdef IEM_WITH_CODE_TLB1304 if (pVCpuCaller == pVCpu)1305 pVCpu->iem.s.cbInstrBufTotal = 0;1306 # endif1307 1308 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);1309 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;1310 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))1311 { /* likely */}1312 else if (pVCpuCaller != pVCpu)1313 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;1314 else1315 {1316 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1317 continue;1318 }1319 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))1320 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1321 1322 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))1323 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1324 }1325 VMCC_FOR_EACH_VMCPU_END(pVM);1326 1327 #else1328 RT_NOREF(pVM, idCpuCaller, enmReason);1329 #endif1330 }1331 1332 1333 /**1334 * Flushes the prefetch buffer, light version.1335 */1336 void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)1337 {1338 #ifndef IEM_WITH_CODE_TLB1339 pVCpu->iem.s.cbOpcode = cbInstr;1340 #else1341 RT_NOREF(pVCpu, cbInstr);1342 #endif1343 }1344 1345 1346 /**1347 * Flushes the prefetch buffer, heavy version.1348 */1349 void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)1350 {1351 #ifndef IEM_WITH_CODE_TLB1352 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */1353 #elif 11354 pVCpu->iem.s.cbInstrBufTotal = 0;1355 RT_NOREF(cbInstr);1356 #else1357 RT_NOREF(pVCpu, cbInstr);1358 #endif1359 }1360 1361 1362 1363 #ifdef IEM_WITH_CODE_TLB1364 1365 /**1366 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on1367 * failure and jumps.1368 *1369 * We end up here for a number of reasons:1370 * - pbInstrBuf isn't yet initialized.1371 * - Advancing beyond the buffer boundrary (e.g. cross page).1372 * - Advancing beyond the CS segment limit.1373 * - Fetching from non-mappable page (e.g. MMIO).1374 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).1375 *1376 * @param pVCpu The cross context virtual CPU structure of the1377 * calling thread.1378 * @param pvDst Where to return the bytes.1379 * @param cbDst Number of bytes to read. A value of zero is1380 * allowed for initializing pbInstrBuf (the1381 * recompiler does this). In this case it is best1382 * to set pbInstrBuf to NULL prior to the call.1383 */1384 void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP1385 {1386 # ifdef IN_RING31387 for (;;)1388 {1389 Assert(cbDst <= 8);1390 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;1391 1392 /*1393 * We might have a partial buffer match, deal with that first to make the1394 * rest simpler. This is the first part of the cross page/buffer case.1395 */1396 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;1397 if (pbInstrBuf != NULL)1398 {1399 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */1400 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;1401 if (offBuf < cbInstrBuf)1402 {1403 Assert(offBuf + cbDst > cbInstrBuf);1404 uint32_t const cbCopy = cbInstrBuf - offBuf;1405 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);1406 1407 cbDst -= cbCopy;1408 pvDst = (uint8_t *)pvDst + cbCopy;1409 offBuf += cbCopy;1410 }1411 }1412 1413 /*1414 * Check segment limit, figuring how much we're allowed to access at this point.1415 *1416 * We will fault immediately if RIP is past the segment limit / in non-canonical1417 * territory. If we do continue, there are one or more bytes to read before we1418 * end up in trouble and we need to do that first before faulting.1419 */1420 RTGCPTR GCPtrFirst;1421 uint32_t cbMaxRead;1422 if (IEM_IS_64BIT_CODE(pVCpu))1423 {1424 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1425 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))1426 { /* likely */ }1427 else1428 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1429 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);1430 }1431 else1432 {1433 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1434 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */1435 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))1436 { /* likely */ }1437 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */1438 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1439 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;1440 if (cbMaxRead != 0)1441 { /* likely */ }1442 else1443 {1444 /* Overflowed because address is 0 and limit is max. */1445 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);1446 cbMaxRead = X86_PAGE_SIZE;1447 }1448 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;1449 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);1450 if (cbMaxRead2 < cbMaxRead)1451 cbMaxRead = cbMaxRead2;1452 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */1453 }1454 1455 /*1456 * Get the TLB entry for this piece of code.1457 */1458 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);1459 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);1460 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)1461 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))1462 {1463 /* likely when executing lots of code, otherwise unlikely */1464 # ifdef IEM_WITH_TLB_STATISTICS1465 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;1466 # endif1467 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));1468 1469 /* Check TLB page table level access flags. */1470 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))1471 {1472 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)1473 {1474 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));1475 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1476 }1477 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))1478 {1479 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));1480 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1481 }1482 }1483 1484 /* Look up the physical page info if necessary. */1485 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)1486 { /* not necessary */ }1487 else1488 {1489 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))1490 { /* likely */ }1491 else1492 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1493 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;1494 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,1495 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);1496 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));1497 }1498 }1499 else1500 {1501 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;1502 1503 /* This page table walking will set A bits as required by the access while performing the walk.1504 ASSUMES these are set when the address is translated rather than on commit... */1505 /** @todo testcase: check when A bits are actually set by the CPU for code. */1506 PGMPTWALKFAST WalkFast;1507 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,1508 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,1509 &WalkFast);1510 if (RT_SUCCESS(rc))1511 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);1512 else1513 {1514 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1515 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */1516 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));1517 # endif1518 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));1519 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);1520 }1521 1522 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);1523 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)1524 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */1525 {1526 pTlbe--;1527 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;1528 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)1529 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));1530 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP1531 else1532 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));1533 # endif1534 }1535 else1536 {1537 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;1538 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;1539 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)1540 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));1541 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP1542 else1543 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);1544 # endif1545 }1546 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))1547 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/1548 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);1549 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;1550 pTlbe->GCPhys = GCPhysPg;1551 pTlbe->pbMappingR3 = NULL;1552 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));1553 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);1554 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));1555 1556 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))1557 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);1558 else1559 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);1560 1561 /* Resolve the physical address. */1562 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))1563 { /* likely */ }1564 else1565 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1566 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));1567 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,1568 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);1569 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));1570 }1571 1572 # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */1573 /*1574 * Try do a direct read using the pbMappingR3 pointer.1575 * Note! Do not recheck the physical TLB revision number here as we have the1576 * wrong response to changes in the else case. If someone is updating1577 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine1578 * pretending we always won the race.1579 */1580 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))1581 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)1582 {1583 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);1584 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;1585 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)1586 {1587 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);1588 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;1589 }1590 else1591 {1592 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;1593 if (cbInstr + (uint32_t)cbDst <= 15)1594 {1595 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;1596 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);1597 }1598 else1599 {1600 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",1601 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));1602 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1603 }1604 }1605 if (cbDst <= cbMaxRead)1606 {1607 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */1608 # if 0 /* unused */1609 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;1610 # endif1611 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;1612 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;1613 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;1614 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;1615 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */1616 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);1617 else1618 Assert(!pvDst);1619 return;1620 }1621 pVCpu->iem.s.pbInstrBuf = NULL;1622 1623 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);1624 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;1625 }1626 # else1627 # error "refactor as needed"1628 /*1629 * If there is no special read handling, so we can read a bit more and1630 * put it in the prefetch buffer.1631 */1632 if ( cbDst < cbMaxRead1633 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)1634 {1635 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,1636 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);1637 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1638 { /* likely */ }1639 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1640 {1641 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1642 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1643 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1644 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));1645 }1646 else1647 {1648 Log((RT_SUCCESS(rcStrict)1649 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1650 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1651 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1652 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1653 }1654 }1655 # endif1656 /*1657 * Special read handling, so only read exactly what's needed.1658 * This is a highly unlikely scenario.1659 */1660 else1661 {1662 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;1663 1664 /* Check instruction length. */1665 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;1666 if (RT_LIKELY(cbInstr + cbDst <= 15))1667 { /* likely */ }1668 else1669 {1670 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",1671 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));1672 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1673 }1674 1675 /* Do the reading. */1676 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);1677 if (cbToRead > 0)1678 {1679 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),1680 pvDst, cbToRead, PGMACCESSORIGIN_IEM);1681 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1682 { /* likely */ }1683 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1684 {1685 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1686 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));1687 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1688 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));1689 }1690 else1691 {1692 Log((RT_SUCCESS(rcStrict)1693 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1694 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1695 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));1696 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1697 }1698 }1699 1700 /* Update the state and probably return. */1701 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);1702 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;1703 # if 0 /* unused */1704 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;1705 # endif1706 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);1707 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;1708 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;1709 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */1710 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;1711 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;1712 pVCpu->iem.s.pbInstrBuf = NULL;1713 if (cbToRead == cbDst)1714 return;1715 Assert(cbToRead == cbMaxRead);1716 }1717 1718 /*1719 * More to read, loop.1720 */1721 cbDst -= cbMaxRead;1722 pvDst = (uint8_t *)pvDst + cbMaxRead;1723 }1724 # else /* !IN_RING3 */1725 RT_NOREF(pvDst, cbDst);1726 if (pvDst || cbDst)1727 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);1728 # endif /* !IN_RING3 */1729 }1730 1731 #else /* !IEM_WITH_CODE_TLB */1732 1733 /**1734 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate1735 * exception if it fails.1736 *1737 * @returns Strict VBox status code.1738 * @param pVCpu The cross context virtual CPU structure of the1739 * calling thread.1740 * @param cbMin The minimum number of bytes relative offOpcode1741 * that must be read.1742 */1743 VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT1744 {1745 /*1746 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.1747 *1748 * First translate CS:rIP to a physical address.1749 */1750 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;1751 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;1752 uint8_t const cbLeft = cbOpcode - offOpcode;1753 Assert(cbLeft < cbMin);1754 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));1755 1756 uint32_t cbToTryRead;1757 RTGCPTR GCPtrNext;1758 if (IEM_IS_64BIT_CODE(pVCpu))1759 {1760 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;1761 if (!IEM_IS_CANONICAL(GCPtrNext))1762 return iemRaiseGeneralProtectionFault0(pVCpu);1763 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);1764 }1765 else1766 {1767 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;1768 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */1769 GCPtrNext32 += cbOpcode;1770 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)1771 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */1772 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1773 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;1774 if (!cbToTryRead) /* overflowed */1775 {1776 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);1777 cbToTryRead = UINT32_MAX;1778 /** @todo check out wrapping around the code segment. */1779 }1780 if (cbToTryRead < cbMin - cbLeft)1781 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1782 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;1783 1784 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);1785 if (cbToTryRead > cbLeftOnPage)1786 cbToTryRead = cbLeftOnPage;1787 }1788 1789 /* Restrict to opcode buffer space.1790 1791 We're making ASSUMPTIONS here based on work done previously in1792 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will1793 be fetched in case of an instruction crossing two pages. */1794 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)1795 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;1796 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))1797 { /* likely */ }1798 else1799 {1800 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",1801 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));1802 return iemRaiseGeneralProtectionFault0(pVCpu);1803 }1804 1805 PGMPTWALKFAST WalkFast;1806 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,1807 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,1808 &WalkFast);1809 if (RT_SUCCESS(rc))1810 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);1811 else1812 {1813 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));1814 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1815 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)1816 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);1817 #endif1818 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);1819 }1820 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);1821 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));1822 1823 RTGCPHYS const GCPhys = WalkFast.GCPhys;1824 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));1825 1826 /*1827 * Read the bytes at this address.1828 *1829 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,1830 * and since PATM should only patch the start of an instruction there1831 * should be no need to check again here.1832 */1833 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))1834 {1835 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],1836 cbToTryRead, PGMACCESSORIGIN_IEM);1837 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1838 { /* likely */ }1839 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1840 {1841 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1842 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1843 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1844 }1845 else1846 {1847 Log((RT_SUCCESS(rcStrict)1848 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1849 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1850 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1851 return rcStrict;1852 }1853 }1854 else1855 {1856 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);1857 if (RT_SUCCESS(rc))1858 { /* likely */ }1859 else1860 {1861 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));1862 return rc;1863 }1864 }1865 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;1866 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));1867 1868 return VINF_SUCCESS;1869 }1870 1871 #endif /* !IEM_WITH_CODE_TLB */1872 #ifndef IEM_WITH_SETJMP1873 1874 /**1875 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.1876 *1877 * @returns Strict VBox status code.1878 * @param pVCpu The cross context virtual CPU structure of the1879 * calling thread.1880 * @param pb Where to return the opcode byte.1881 */1882 VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT1883 {1884 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);1885 if (rcStrict == VINF_SUCCESS)1886 {1887 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1888 *pb = pVCpu->iem.s.abOpcode[offOpcode];1889 pVCpu->iem.s.offOpcode = offOpcode + 1;1890 }1891 else1892 *pb = 0;1893 return rcStrict;1894 }1895 1896 #else /* IEM_WITH_SETJMP */1897 1898 /**1899 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.1900 *1901 * @returns The opcode byte.1902 * @param pVCpu The cross context virtual CPU structure of the calling thread.1903 */1904 uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP1905 {1906 # ifdef IEM_WITH_CODE_TLB1907 uint8_t u8;1908 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);1909 return u8;1910 # else1911 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);1912 if (rcStrict == VINF_SUCCESS)1913 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];1914 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1915 # endif1916 }1917 1918 #endif /* IEM_WITH_SETJMP */1919 1920 #ifndef IEM_WITH_SETJMP1921 1922 /**1923 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.1924 *1925 * @returns Strict VBox status code.1926 * @param pVCpu The cross context virtual CPU structure of the calling thread.1927 * @param pu16 Where to return the opcode dword.1928 */1929 VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT1930 {1931 uint8_t u8;1932 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1933 if (rcStrict == VINF_SUCCESS)1934 *pu16 = (int8_t)u8;1935 return rcStrict;1936 }1937 1938 1939 /**1940 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.1941 *1942 * @returns Strict VBox status code.1943 * @param pVCpu The cross context virtual CPU structure of the calling thread.1944 * @param pu32 Where to return the opcode dword.1945 */1946 VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT1947 {1948 uint8_t u8;1949 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1950 if (rcStrict == VINF_SUCCESS)1951 *pu32 = (int8_t)u8;1952 return rcStrict;1953 }1954 1955 1956 /**1957 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.1958 *1959 * @returns Strict VBox status code.1960 * @param pVCpu The cross context virtual CPU structure of the calling thread.1961 * @param pu64 Where to return the opcode qword.1962 */1963 VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT1964 {1965 uint8_t u8;1966 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1967 if (rcStrict == VINF_SUCCESS)1968 *pu64 = (int8_t)u8;1969 return rcStrict;1970 }1971 1972 #endif /* !IEM_WITH_SETJMP */1973 1974 1975 #ifndef IEM_WITH_SETJMP1976 1977 /**1978 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.1979 *1980 * @returns Strict VBox status code.1981 * @param pVCpu The cross context virtual CPU structure of the calling thread.1982 * @param pu16 Where to return the opcode word.1983 */1984 VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT1985 {1986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);1987 if (rcStrict == VINF_SUCCESS)1988 {1989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1990 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1991 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];1992 # else1993 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);1994 # endif1995 pVCpu->iem.s.offOpcode = offOpcode + 2;1996 }1997 else1998 *pu16 = 0;1999 return rcStrict;2000 }2001 2002 #else /* IEM_WITH_SETJMP */2003 2004 /**2005 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error2006 *2007 * @returns The opcode word.2008 * @param pVCpu The cross context virtual CPU structure of the calling thread.2009 */2010 uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2011 {2012 # ifdef IEM_WITH_CODE_TLB2013 uint16_t u16;2014 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);2015 return u16;2016 # else2017 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2018 if (rcStrict == VINF_SUCCESS)2019 {2020 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2021 pVCpu->iem.s.offOpcode += 2;2022 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2023 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2024 # else2025 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2026 # endif2027 }2028 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2029 # endif2030 }2031 2032 #endif /* IEM_WITH_SETJMP */2033 2034 #ifndef IEM_WITH_SETJMP2035 2036 /**2037 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.2038 *2039 * @returns Strict VBox status code.2040 * @param pVCpu The cross context virtual CPU structure of the calling thread.2041 * @param pu32 Where to return the opcode double word.2042 */2043 VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT2044 {2045 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2046 if (rcStrict == VINF_SUCCESS)2047 {2048 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2049 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2050 pVCpu->iem.s.offOpcode = offOpcode + 2;2051 }2052 else2053 *pu32 = 0;2054 return rcStrict;2055 }2056 2057 2058 /**2059 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.2060 *2061 * @returns Strict VBox status code.2062 * @param pVCpu The cross context virtual CPU structure of the calling thread.2063 * @param pu64 Where to return the opcode quad word.2064 */2065 VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2066 {2067 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2068 if (rcStrict == VINF_SUCCESS)2069 {2070 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2071 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2072 pVCpu->iem.s.offOpcode = offOpcode + 2;2073 }2074 else2075 *pu64 = 0;2076 return rcStrict;2077 }2078 2079 #endif /* !IEM_WITH_SETJMP */2080 2081 #ifndef IEM_WITH_SETJMP2082 2083 /**2084 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.2085 *2086 * @returns Strict VBox status code.2087 * @param pVCpu The cross context virtual CPU structure of the calling thread.2088 * @param pu32 Where to return the opcode dword.2089 */2090 VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT2091 {2092 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2093 if (rcStrict == VINF_SUCCESS)2094 {2095 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2096 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2097 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2098 # else2099 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2100 pVCpu->iem.s.abOpcode[offOpcode + 1],2101 pVCpu->iem.s.abOpcode[offOpcode + 2],2102 pVCpu->iem.s.abOpcode[offOpcode + 3]);2103 # endif2104 pVCpu->iem.s.offOpcode = offOpcode + 4;2105 }2106 else2107 *pu32 = 0;2108 return rcStrict;2109 }2110 2111 #else /* IEM_WITH_SETJMP */2112 2113 /**2114 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.2115 *2116 * @returns The opcode dword.2117 * @param pVCpu The cross context virtual CPU structure of the calling thread.2118 */2119 uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2120 {2121 # ifdef IEM_WITH_CODE_TLB2122 uint32_t u32;2123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);2124 return u32;2125 # else2126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2127 if (rcStrict == VINF_SUCCESS)2128 {2129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2130 pVCpu->iem.s.offOpcode = offOpcode + 4;2131 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2132 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2133 # else2134 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2135 pVCpu->iem.s.abOpcode[offOpcode + 1],2136 pVCpu->iem.s.abOpcode[offOpcode + 2],2137 pVCpu->iem.s.abOpcode[offOpcode + 3]);2138 # endif2139 }2140 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2141 # endif2142 }2143 2144 #endif /* IEM_WITH_SETJMP */2145 2146 #ifndef IEM_WITH_SETJMP2147 2148 /**2149 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.2150 *2151 * @returns Strict VBox status code.2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.2153 * @param pu64 Where to return the opcode dword.2154 */2155 VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2156 {2157 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2158 if (rcStrict == VINF_SUCCESS)2159 {2160 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2161 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2162 pVCpu->iem.s.abOpcode[offOpcode + 1],2163 pVCpu->iem.s.abOpcode[offOpcode + 2],2164 pVCpu->iem.s.abOpcode[offOpcode + 3]);2165 pVCpu->iem.s.offOpcode = offOpcode + 4;2166 }2167 else2168 *pu64 = 0;2169 return rcStrict;2170 }2171 2172 2173 /**2174 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.2175 *2176 * @returns Strict VBox status code.2177 * @param pVCpu The cross context virtual CPU structure of the calling thread.2178 * @param pu64 Where to return the opcode qword.2179 */2180 VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2181 {2182 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2183 if (rcStrict == VINF_SUCCESS)2184 {2185 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2186 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2187 pVCpu->iem.s.abOpcode[offOpcode + 1],2188 pVCpu->iem.s.abOpcode[offOpcode + 2],2189 pVCpu->iem.s.abOpcode[offOpcode + 3]);2190 pVCpu->iem.s.offOpcode = offOpcode + 4;2191 }2192 else2193 *pu64 = 0;2194 return rcStrict;2195 }2196 2197 #endif /* !IEM_WITH_SETJMP */2198 2199 #ifndef IEM_WITH_SETJMP2200 2201 /**2202 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.2203 *2204 * @returns Strict VBox status code.2205 * @param pVCpu The cross context virtual CPU structure of the calling thread.2206 * @param pu64 Where to return the opcode qword.2207 */2208 VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2209 {2210 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);2211 if (rcStrict == VINF_SUCCESS)2212 {2213 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2214 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2215 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2216 # else2217 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2218 pVCpu->iem.s.abOpcode[offOpcode + 1],2219 pVCpu->iem.s.abOpcode[offOpcode + 2],2220 pVCpu->iem.s.abOpcode[offOpcode + 3],2221 pVCpu->iem.s.abOpcode[offOpcode + 4],2222 pVCpu->iem.s.abOpcode[offOpcode + 5],2223 pVCpu->iem.s.abOpcode[offOpcode + 6],2224 pVCpu->iem.s.abOpcode[offOpcode + 7]);2225 # endif2226 pVCpu->iem.s.offOpcode = offOpcode + 8;2227 }2228 else2229 *pu64 = 0;2230 return rcStrict;2231 }2232 2233 #else /* IEM_WITH_SETJMP */2234 2235 /**2236 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.2237 *2238 * @returns The opcode qword.2239 * @param pVCpu The cross context virtual CPU structure of the calling thread.2240 */2241 uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2242 {2243 # ifdef IEM_WITH_CODE_TLB2244 uint64_t u64;2245 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);2246 return u64;2247 # else2248 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);2249 if (rcStrict == VINF_SUCCESS)2250 {2251 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2252 pVCpu->iem.s.offOpcode = offOpcode + 8;2253 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2254 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2255 # else2256 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2257 pVCpu->iem.s.abOpcode[offOpcode + 1],2258 pVCpu->iem.s.abOpcode[offOpcode + 2],2259 pVCpu->iem.s.abOpcode[offOpcode + 3],2260 pVCpu->iem.s.abOpcode[offOpcode + 4],2261 pVCpu->iem.s.abOpcode[offOpcode + 5],2262 pVCpu->iem.s.abOpcode[offOpcode + 6],2263 pVCpu->iem.s.abOpcode[offOpcode + 7]);2264 # endif2265 }2266 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2267 # endif2268 }2269 2270 #endif /* IEM_WITH_SETJMP */2271 2272 2273 2274 /** @name Misc Worker Functions.2275 * @{2276 */2277 2278 /**2279 * Gets the exception class for the specified exception vector.2280 *2281 * @returns The class of the specified exception.2282 * @param uVector The exception vector.2283 */2284 static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT2285 {2286 Assert(uVector <= X86_XCPT_LAST);2287 switch (uVector)2288 {2289 case X86_XCPT_DE:2290 case X86_XCPT_TS:2291 case X86_XCPT_NP:2292 case X86_XCPT_SS:2293 case X86_XCPT_GP:2294 case X86_XCPT_SX: /* AMD only */2295 return IEMXCPTCLASS_CONTRIBUTORY;2296 2297 case X86_XCPT_PF:2298 case X86_XCPT_VE: /* Intel only */2299 return IEMXCPTCLASS_PAGE_FAULT;2300 2301 case X86_XCPT_DF:2302 return IEMXCPTCLASS_DOUBLE_FAULT;2303 }2304 return IEMXCPTCLASS_BENIGN;2305 }2306 2307 2308 /**2309 * Evaluates how to handle an exception caused during delivery of another event2310 * (exception / interrupt).2311 *2312 * @returns How to handle the recursive exception.2313 * @param pVCpu The cross context virtual CPU structure of the2314 * calling thread.2315 * @param fPrevFlags The flags of the previous event.2316 * @param uPrevVector The vector of the previous event.2317 * @param fCurFlags The flags of the current exception.2318 * @param uCurVector The vector of the current exception.2319 * @param pfXcptRaiseInfo Where to store additional information about the2320 * exception condition. Optional.2321 */2322 VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,2323 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)2324 {2325 /*2326 * Only CPU exceptions can be raised while delivering other events, software interrupt2327 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.2328 */2329 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);2330 Assert(pVCpu); RT_NOREF(pVCpu);2331 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));2332 2333 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;2334 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;2335 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)2336 {2337 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);2338 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)2339 {2340 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);2341 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT2342 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT2343 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))2344 {2345 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;2346 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF2347 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;2348 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,2349 uCurVector, pVCpu->cpum.GstCtx.cr2));2350 }2351 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY2352 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)2353 {2354 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;2355 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));2356 }2357 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT2358 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY2359 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))2360 {2361 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;2362 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));2363 }2364 }2365 else2366 {2367 if (uPrevVector == X86_XCPT_NMI)2368 {2369 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;2370 if (uCurVector == X86_XCPT_PF)2371 {2372 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;2373 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));2374 }2375 }2376 else if ( uPrevVector == X86_XCPT_AC2377 && uCurVector == X86_XCPT_AC)2378 {2379 enmRaise = IEMXCPTRAISE_CPU_HANG;2380 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;2381 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));2382 }2383 }2384 }2385 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)2386 {2387 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;2388 if (uCurVector == X86_XCPT_PF)2389 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;2390 }2391 else2392 {2393 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);2394 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;2395 }2396 2397 if (pfXcptRaiseInfo)2398 *pfXcptRaiseInfo = fRaiseInfo;2399 return enmRaise;2400 }2401 2402 2403 /**2404 * Enters the CPU shutdown state initiated by a triple fault or other2405 * unrecoverable conditions.2406 *2407 * @returns Strict VBox status code.2408 * @param pVCpu The cross context virtual CPU structure of the2409 * calling thread.2410 */2411 static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT2412 {2413 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))2414 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);2415 2416 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))2417 {2418 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));2419 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);2420 }2421 2422 RT_NOREF(pVCpu);2423 return VINF_EM_TRIPLE_FAULT;2424 }2425 2426 2427 /**2428 * Validates a new SS segment.2429 *2430 * @returns VBox strict status code.2431 * @param pVCpu The cross context virtual CPU structure of the2432 * calling thread.2433 * @param NewSS The new SS selctor.2434 * @param uCpl The CPL to load the stack for.2435 * @param pDesc Where to return the descriptor.2436 */2437 static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT2438 {2439 /* Null selectors are not allowed (we're not called for dispatching2440 interrupts with SS=0 in long mode). */2441 if (!(NewSS & X86_SEL_MASK_OFF_RPL))2442 {2443 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));2444 return iemRaiseTaskSwitchFault0(pVCpu);2445 }2446 2447 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */2448 if ((NewSS & X86_SEL_RPL) != uCpl)2449 {2450 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));2451 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2452 }2453 2454 /*2455 * Read the descriptor.2456 */2457 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);2458 if (rcStrict != VINF_SUCCESS)2459 return rcStrict;2460 2461 /*2462 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.2463 */2464 if (!pDesc->Legacy.Gen.u1DescType)2465 {2466 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));2467 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2468 }2469 2470 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)2471 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )2472 {2473 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));2474 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2475 }2476 if (pDesc->Legacy.Gen.u2Dpl != uCpl)2477 {2478 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));2479 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);2480 }2481 2482 /* Is it there? */2483 /** @todo testcase: Is this checked before the canonical / limit check below? */2484 if (!pDesc->Legacy.Gen.u1Present)2485 {2486 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));2487 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);2488 }2489 2490 return VINF_SUCCESS;2491 }2492 2493 /** @} */2494 2495 2496 /** @name Raising Exceptions.2497 *2498 * @{2499 */2500 2501 2502 /**2503 * Loads the specified stack far pointer from the TSS.2504 *2505 * @returns VBox strict status code.2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.2507 * @param uCpl The CPL to load the stack for.2508 * @param pSelSS Where to return the new stack segment.2509 * @param puEsp Where to return the new stack pointer.2510 */2511 static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT2512 {2513 VBOXSTRICTRC rcStrict;2514 Assert(uCpl < 4);2515 2516 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);2517 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)2518 {2519 /*2520 * 16-bit TSS (X86TSS16).2521 */2522 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();2523 case X86_SEL_TYPE_SYS_286_TSS_BUSY:2524 {2525 uint32_t off = uCpl * 4 + 2;2526 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)2527 {2528 /** @todo check actual access pattern here. */2529 uint32_t u32Tmp = 0; /* gcc maybe... */2530 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2531 if (rcStrict == VINF_SUCCESS)2532 {2533 *puEsp = RT_LOWORD(u32Tmp);2534 *pSelSS = RT_HIWORD(u32Tmp);2535 return VINF_SUCCESS;2536 }2537 }2538 else2539 {2540 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));2541 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2542 }2543 break;2544 }2545 2546 /*2547 * 32-bit TSS (X86TSS32).2548 */2549 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();2550 case X86_SEL_TYPE_SYS_386_TSS_BUSY:2551 {2552 uint32_t off = uCpl * 8 + 4;2553 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)2554 {2555 /** @todo check actual access pattern here. */2556 uint64_t u64Tmp;2557 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2558 if (rcStrict == VINF_SUCCESS)2559 {2560 *puEsp = u64Tmp & UINT32_MAX;2561 *pSelSS = (RTSEL)(u64Tmp >> 32);2562 return VINF_SUCCESS;2563 }2564 }2565 else2566 {2567 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));2568 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2569 }2570 break;2571 }2572 2573 default:2574 AssertFailed();2575 rcStrict = VERR_IEM_IPE_4;2576 break;2577 }2578 2579 *puEsp = 0; /* make gcc happy */2580 *pSelSS = 0; /* make gcc happy */2581 return rcStrict;2582 }2583 2584 2585 /**2586 * Loads the specified stack pointer from the 64-bit TSS.2587 *2588 * @returns VBox strict status code.2589 * @param pVCpu The cross context virtual CPU structure of the calling thread.2590 * @param uCpl The CPL to load the stack for.2591 * @param uIst The interrupt stack table index, 0 if to use uCpl.2592 * @param puRsp Where to return the new stack pointer.2593 */2594 static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT2595 {2596 Assert(uCpl < 4);2597 Assert(uIst < 8);2598 *puRsp = 0; /* make gcc happy */2599 2600 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);2601 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);2602 2603 uint32_t off;2604 if (uIst)2605 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);2606 else2607 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);2608 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)2609 {2610 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));2611 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);2612 }2613 2614 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);2615 }2616 2617 2618 /**2619 * Adjust the CPU state according to the exception being raised.2620 *2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.2622 * @param u8Vector The exception that has been raised.2623 */2624 DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)2625 {2626 switch (u8Vector)2627 {2628 case X86_XCPT_DB:2629 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);2630 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;2631 break;2632 /** @todo Read the AMD and Intel exception reference... */2633 }2634 }2635 2636 2637 /**2638 * Implements exceptions and interrupts for real mode.2639 *2640 * @returns VBox strict status code.2641 * @param pVCpu The cross context virtual CPU structure of the calling thread.2642 * @param cbInstr The number of bytes to offset rIP by in the return2643 * address.2644 * @param u8Vector The interrupt / exception vector number.2645 * @param fFlags The flags.2646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.2647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.2648 */2649 static VBOXSTRICTRC2650 iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,2651 uint8_t cbInstr,2652 uint8_t u8Vector,2653 uint32_t fFlags,2654 uint16_t uErr,2655 uint64_t uCr2) RT_NOEXCEPT2656 {2657 NOREF(uErr); NOREF(uCr2);2658 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);2659 2660 /*2661 * Read the IDT entry.2662 */2663 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)2664 {2665 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));2666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));2667 }2668 RTFAR16 Idte;2669 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);2670 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))2671 {2672 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));2673 return rcStrict;2674 }2675 2676 #ifdef LOG_ENABLED2677 /* If software interrupt, try decode it if logging is enabled and such. */2678 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)2679 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))2680 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);2681 #endif2682 2683 /*2684 * Push the stack frame.2685 */2686 uint8_t bUnmapInfo;2687 uint16_t *pu16Frame;2688 uint64_t uNewRsp;2689 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);2690 if (rcStrict != VINF_SUCCESS)2691 return rcStrict;2692 2693 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);2694 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC2695 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);2696 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)2697 fEfl |= UINT16_C(0xf000);2698 #endif2699 pu16Frame[2] = (uint16_t)fEfl;2700 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;2701 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;2702 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);2703 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))2704 return rcStrict;2705 2706 /*2707 * Load the vector address into cs:ip and make exception specific state2708 * adjustments.2709 */2710 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;2711 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;2712 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;2713 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;2714 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */2715 pVCpu->cpum.GstCtx.rip = Idte.off;2716 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);2717 IEMMISC_SET_EFL(pVCpu, fEfl);2718 2719 /** @todo do we actually do this in real mode? */2720 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)2721 iemRaiseXcptAdjustState(pVCpu, u8Vector);2722 2723 /*2724 * Deal with debug events that follows the exception and clear inhibit flags.2725 */2726 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)2727 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))2728 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);2729 else2730 {2731 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",2732 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));2733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);2734 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)2735 >> CPUMCTX_DBG_HIT_DRX_SHIFT;2736 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);2737 return iemRaiseDebugException(pVCpu);2738 }2739 2740 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,2741 so best leave them alone in case we're in a weird kind of real mode... */2742 2743 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;2744 }2745 2746 2747 /**2748 * Loads a NULL data selector into when coming from V8086 mode.2749 *2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.2751 * @param pSReg Pointer to the segment register.2752 */2753 DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)2754 {2755 pSReg->Sel = 0;2756 pSReg->ValidSel = 0;2757 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))2758 {2759 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */2760 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;2761 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;2762 }2763 else2764 {2765 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2766 /** @todo check this on AMD-V */2767 pSReg->u64Base = 0;2768 pSReg->u32Limit = 0;2769 }2770 }2771 2772 2773 /**2774 * Loads a segment selector during a task switch in V8086 mode.2775 *2776 * @param pSReg Pointer to the segment register.2777 * @param uSel The selector value to load.2778 */2779 DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)2780 {2781 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */2782 pSReg->Sel = uSel;2783 pSReg->ValidSel = uSel;2784 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2785 pSReg->u64Base = uSel << 4;2786 pSReg->u32Limit = 0xffff;2787 pSReg->Attr.u = 0xf3;2788 }2789 2790 2791 /**2792 * Loads a segment selector during a task switch in protected mode.2793 *2794 * In this task switch scenario, we would throw \#TS exceptions rather than2795 * \#GPs.2796 *2797 * @returns VBox strict status code.2798 * @param pVCpu The cross context virtual CPU structure of the calling thread.2799 * @param pSReg Pointer to the segment register.2800 * @param uSel The new selector value.2801 *2802 * @remarks This does _not_ handle CS or SS.2803 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.2804 */2805 static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT2806 {2807 Assert(!IEM_IS_64BIT_CODE(pVCpu));2808 2809 /* Null data selector. */2810 if (!(uSel & X86_SEL_MASK_OFF_RPL))2811 {2812 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);2813 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));2814 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);2815 return VINF_SUCCESS;2816 }2817 2818 /* Fetch the descriptor. */2819 IEMSELDESC Desc;2820 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);2821 if (rcStrict != VINF_SUCCESS)2822 {2823 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,2824 VBOXSTRICTRC_VAL(rcStrict)));2825 return rcStrict;2826 }2827 2828 /* Must be a data segment or readable code segment. */2829 if ( !Desc.Legacy.Gen.u1DescType2830 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)2831 {2832 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,2833 Desc.Legacy.Gen.u4Type));2834 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2835 }2836 2837 /* Check privileges for data segments and non-conforming code segments. */2838 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))2839 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))2840 {2841 /* The RPL and the new CPL must be less than or equal to the DPL. */2842 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl2843 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))2844 {2845 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",2846 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));2847 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2848 }2849 }2850 2851 /* Is it there? */2852 if (!Desc.Legacy.Gen.u1Present)2853 {2854 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));2855 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);2856 }2857 2858 /* The base and limit. */2859 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);2860 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);2861 2862 /*2863 * Ok, everything checked out fine. Now set the accessed bit before2864 * committing the result into the registers.2865 */2866 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))2867 {2868 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);2869 if (rcStrict != VINF_SUCCESS)2870 return rcStrict;2871 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;2872 }2873 2874 /* Commit */2875 pSReg->Sel = uSel;2876 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);2877 pSReg->u32Limit = cbLimit;2878 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */2879 pSReg->ValidSel = uSel;2880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;2881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))2882 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;2883 2884 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));2885 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);2886 return VINF_SUCCESS;2887 }2888 2889 2890 /**2891 * Performs a task switch.2892 *2893 * If the task switch is the result of a JMP, CALL or IRET instruction, the2894 * caller is responsible for performing the necessary checks (like DPL, TSS2895 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction2896 * reference for JMP, CALL, IRET.2897 *2898 * If the task switch is the due to a software interrupt or hardware exception,2899 * the caller is responsible for validating the TSS selector and descriptor. See2900 * Intel Instruction reference for INT n.2901 *2902 * @returns VBox strict status code.2903 * @param pVCpu The cross context virtual CPU structure of the calling thread.2904 * @param enmTaskSwitch The cause of the task switch.2905 * @param uNextEip The EIP effective after the task switch.2906 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.2907 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.2908 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.2909 * @param SelTss The TSS selector of the new task.2910 * @param pNewDescTss Pointer to the new TSS descriptor.2911 */2912 VBOXSTRICTRC2913 iemTaskSwitch(PVMCPUCC pVCpu,2914 IEMTASKSWITCH enmTaskSwitch,2915 uint32_t uNextEip,2916 uint32_t fFlags,2917 uint16_t uErr,2918 uint64_t uCr2,2919 RTSEL SelTss,2920 PIEMSELDESC pNewDescTss) RT_NOEXCEPT2921 {2922 Assert(!IEM_IS_REAL_MODE(pVCpu));2923 Assert(!IEM_IS_64BIT_CODE(pVCpu));2924 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);2925 2926 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;2927 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL2928 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY2929 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2930 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2931 2932 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL2933 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);2934 2935 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,2936 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));2937 2938 /* Update CR2 in case it's a page-fault. */2939 /** @todo This should probably be done much earlier in IEM/PGM. See2940 * @bugref{5653#c49}. */2941 if (fFlags & IEM_XCPT_FLAGS_CR2)2942 pVCpu->cpum.GstCtx.cr2 = uCr2;2943 2944 /*2945 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"2946 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".2947 */2948 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);2949 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;2950 if (uNewTssLimit < uNewTssLimitMin)2951 {2952 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",2953 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);2955 }2956 2957 /*2958 * Task switches in VMX non-root mode always cause task switches.2959 * The new TSS must have been read and validated (DPL, limits etc.) before a2960 * task-switch VM-exit commences.2961 *2962 * See Intel spec. 25.4.2 "Treatment of Task Switches".2963 */2964 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))2965 {2966 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));2967 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);2968 }2969 2970 /*2971 * The SVM nested-guest intercept for task-switch takes priority over all exceptions2972 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".2973 */2974 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))2975 {2976 uint64_t const uExitInfo1 = SelTss;2977 uint64_t uExitInfo2 = uErr;2978 switch (enmTaskSwitch)2979 {2980 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;2981 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;2982 default: break;2983 }2984 if (fFlags & IEM_XCPT_FLAGS_ERR)2985 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;2986 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)2987 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;2988 2989 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));2990 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);2991 RT_NOREF2(uExitInfo1, uExitInfo2);2992 }2993 2994 /*2995 * Check the current TSS limit. The last written byte to the current TSS during the2996 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).2997 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.2998 *2999 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can3000 * end up with smaller than "legal" TSS limits.3001 */3002 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;3003 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;3004 if (uCurTssLimit < uCurTssLimitMin)3005 {3006 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",3007 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));3008 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);3009 }3010 3011 /*3012 * Verify that the new TSS can be accessed and map it. Map only the required contents3013 * and not the entire TSS.3014 */3015 uint8_t bUnmapInfoNewTss;3016 void *pvNewTss;3017 uint32_t const cbNewTss = uNewTssLimitMin + 1;3018 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);3019 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);3020 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may3021 * not perform correct translation if this happens. See Intel spec. 7.2.13022 * "Task-State Segment". */3023 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);3024 /** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.3025 * Consider wrapping the remainder into a function for simpler cleanup. */3026 if (rcStrict != VINF_SUCCESS)3027 {3028 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,3029 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));3030 return rcStrict;3031 }3032 3033 /*3034 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.3035 */3036 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;3037 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP3038 || enmTaskSwitch == IEMTASKSWITCH_IRET)3039 {3040 uint8_t bUnmapInfoDescCurTss;3041 PX86DESC pDescCurTss;3042 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,3043 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);3044 if (rcStrict != VINF_SUCCESS)3045 {3046 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3047 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3048 return rcStrict;3049 }3050 3051 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3052 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);3053 if (rcStrict != VINF_SUCCESS)3054 {3055 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3056 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3057 return rcStrict;3058 }3059 3060 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */3061 if (enmTaskSwitch == IEMTASKSWITCH_IRET)3062 {3063 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY3064 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);3065 fEFlags &= ~X86_EFL_NT;3066 }3067 }3068 3069 /*3070 * Save the CPU state into the current TSS.3071 */3072 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;3073 if (GCPtrNewTss == GCPtrCurTss)3074 {3075 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));3076 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",3077 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,3078 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,3079 pVCpu->cpum.GstCtx.ldtr.Sel));3080 }3081 if (fIsNewTss386)3082 {3083 /*3084 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.3085 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.3086 */3087 uint8_t bUnmapInfoCurTss32;3088 void *pvCurTss32;3089 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);3090 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);3091 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);3092 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,3093 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);3094 if (rcStrict != VINF_SUCCESS)3095 {3096 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",3097 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));3098 return rcStrict;3099 }3100 3101 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */3102 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);3103 pCurTss32->eip = uNextEip;3104 pCurTss32->eflags = fEFlags;3105 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;3106 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;3107 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;3108 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;3109 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;3110 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;3111 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;3112 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;3113 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;3114 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;3115 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;3116 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;3117 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;3118 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;3119 3120 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);3121 if (rcStrict != VINF_SUCCESS)3122 {3123 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,3124 VBOXSTRICTRC_VAL(rcStrict)));3125 return rcStrict;3126 }3127 }3128 else3129 {3130 /*3131 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.3132 */3133 uint8_t bUnmapInfoCurTss16;3134 void *pvCurTss16;3135 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);3136 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);3137 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);3138 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,3139 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);3140 if (rcStrict != VINF_SUCCESS)3141 {3142 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",3143 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));3144 return rcStrict;3145 }3146 3147 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */3148 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);3149 pCurTss16->ip = uNextEip;3150 pCurTss16->flags = (uint16_t)fEFlags;3151 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;3152 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;3153 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;3154 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;3155 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;3156 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;3157 pCurTss16->si = pVCpu->cpum.GstCtx.si;3158 pCurTss16->di = pVCpu->cpum.GstCtx.di;3159 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;3160 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;3161 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;3162 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;3163 3164 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);3165 if (rcStrict != VINF_SUCCESS)3166 {3167 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,3168 VBOXSTRICTRC_VAL(rcStrict)));3169 return rcStrict;3170 }3171 }3172 3173 /*3174 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.3175 */3176 if ( enmTaskSwitch == IEMTASKSWITCH_CALL3177 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)3178 {3179 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */3180 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;3181 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;3182 }3183 3184 /*3185 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,3186 * it's done further below with error handling (e.g. CR3 changes will go through PGM).3187 */3188 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;3189 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;3190 bool fNewDebugTrap;3191 if (fIsNewTss386)3192 {3193 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;3194 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;3195 uNewEip = pNewTss32->eip;3196 uNewEflags = pNewTss32->eflags;3197 uNewEax = pNewTss32->eax;3198 uNewEcx = pNewTss32->ecx;3199 uNewEdx = pNewTss32->edx;3200 uNewEbx = pNewTss32->ebx;3201 uNewEsp = pNewTss32->esp;3202 uNewEbp = pNewTss32->ebp;3203 uNewEsi = pNewTss32->esi;3204 uNewEdi = pNewTss32->edi;3205 uNewES = pNewTss32->es;3206 uNewCS = pNewTss32->cs;3207 uNewSS = pNewTss32->ss;3208 uNewDS = pNewTss32->ds;3209 uNewFS = pNewTss32->fs;3210 uNewGS = pNewTss32->gs;3211 uNewLdt = pNewTss32->selLdt;3212 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);3213 }3214 else3215 {3216 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;3217 uNewCr3 = 0;3218 uNewEip = pNewTss16->ip;3219 uNewEflags = pNewTss16->flags;3220 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;3221 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;3222 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;3223 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;3224 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;3225 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;3226 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;3227 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;3228 uNewES = pNewTss16->es;3229 uNewCS = pNewTss16->cs;3230 uNewSS = pNewTss16->ss;3231 uNewDS = pNewTss16->ds;3232 uNewFS = 0;3233 uNewGS = 0;3234 uNewLdt = pNewTss16->selLdt;3235 fNewDebugTrap = false;3236 }3237 3238 if (GCPtrNewTss == GCPtrCurTss)3239 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",3240 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));3241 3242 /*3243 * We're done accessing the new TSS.3244 */3245 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);3246 if (rcStrict != VINF_SUCCESS)3247 {3248 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));3249 return rcStrict;3250 }3251 3252 /*3253 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.3254 */3255 if (enmTaskSwitch != IEMTASKSWITCH_IRET)3256 {3257 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,3258 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);3259 if (rcStrict != VINF_SUCCESS)3260 {3261 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3262 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3263 return rcStrict;3264 }3265 3266 /* Check that the descriptor indicates the new TSS is available (not busy). */3267 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL3268 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,3269 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));3270 3271 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3272 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);3273 if (rcStrict != VINF_SUCCESS)3274 {3275 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",3276 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));3277 return rcStrict;3278 }3279 }3280 3281 /*3282 * From this point on, we're technically in the new task. We will defer exceptions3283 * until the completion of the task switch but before executing any instructions in the new task.3284 */3285 pVCpu->cpum.GstCtx.tr.Sel = SelTss;3286 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;3287 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;3288 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);3289 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);3290 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);3291 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);3292 3293 /* Set the busy bit in TR. */3294 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;3295 3296 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */3297 if ( enmTaskSwitch == IEMTASKSWITCH_CALL3298 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)3299 {3300 uNewEflags |= X86_EFL_NT;3301 }3302 3303 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */3304 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;3305 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);3306 3307 pVCpu->cpum.GstCtx.eip = uNewEip;3308 pVCpu->cpum.GstCtx.eax = uNewEax;3309 pVCpu->cpum.GstCtx.ecx = uNewEcx;3310 pVCpu->cpum.GstCtx.edx = uNewEdx;3311 pVCpu->cpum.GstCtx.ebx = uNewEbx;3312 pVCpu->cpum.GstCtx.esp = uNewEsp;3313 pVCpu->cpum.GstCtx.ebp = uNewEbp;3314 pVCpu->cpum.GstCtx.esi = uNewEsi;3315 pVCpu->cpum.GstCtx.edi = uNewEdi;3316 3317 uNewEflags &= X86_EFL_LIVE_MASK;3318 uNewEflags |= X86_EFL_RA1_MASK;3319 IEMMISC_SET_EFL(pVCpu, uNewEflags);3320 3321 /*3322 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors3323 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR33324 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.3325 */3326 pVCpu->cpum.GstCtx.es.Sel = uNewES;3327 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;3328 3329 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;3330 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;3331 3332 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;3333 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;3334 3335 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;3336 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;3337 3338 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;3339 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;3340 3341 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;3342 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;3343 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);3344 3345 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;3346 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;3347 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;3348 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);3349 3350 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))3351 {3352 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;3353 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;3354 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;3355 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;3356 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;3357 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;3358 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;3359 }3360 3361 /*3362 * Switch CR3 for the new task.3363 */3364 if ( fIsNewTss3863365 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))3366 {3367 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */3368 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);3369 AssertRCSuccessReturn(rc, rc);3370 3371 /* Inform PGM. */3372 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */3373 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));3374 AssertRCReturn(rc, rc);3375 /* ignore informational status codes */3376 3377 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);3378 }3379 3380 /*3381 * Switch LDTR for the new task.3382 */3383 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))3384 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);3385 else3386 {3387 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */3388 3389 IEMSELDESC DescNewLdt;3390 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);3391 if (rcStrict != VINF_SUCCESS)3392 {3393 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,3394 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));3395 return rcStrict;3396 }3397 if ( !DescNewLdt.Legacy.Gen.u1Present3398 || DescNewLdt.Legacy.Gen.u1DescType3399 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)3400 {3401 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,3402 uNewLdt, DescNewLdt.Legacy.u));3403 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);3404 }3405 3406 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;3407 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;3408 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);3409 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);3410 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);3411 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))3412 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;3413 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));3414 }3415 3416 IEMSELDESC DescSS;3417 if (IEM_IS_V86_MODE(pVCpu))3418 {3419 IEM_SET_CPL(pVCpu, 3);3420 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);3421 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);3422 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);3423 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);3424 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);3425 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);3426 3427 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */3428 DescSS.Legacy.u = 0;3429 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;3430 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;3431 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;3432 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);3433 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);3434 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;3435 DescSS.Legacy.Gen.u2Dpl = 3;3436 }3437 else3438 {3439 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);3440 3441 /*3442 * Load the stack segment for the new task.3443 */3444 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))3445 {3446 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));3447 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3448 }3449 3450 /* Fetch the descriptor. */3451 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);3452 if (rcStrict != VINF_SUCCESS)3453 {3454 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,3455 VBOXSTRICTRC_VAL(rcStrict)));3456 return rcStrict;3457 }3458 3459 /* SS must be a data segment and writable. */3460 if ( !DescSS.Legacy.Gen.u1DescType3461 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)3462 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))3463 {3464 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",3465 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));3466 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3467 }3468 3469 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */3470 if ( (uNewSS & X86_SEL_RPL) != uNewCpl3471 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)3472 {3473 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,3474 uNewCpl));3475 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3476 }3477 3478 /* Is it there? */3479 if (!DescSS.Legacy.Gen.u1Present)3480 {3481 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));3482 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);3483 }3484 3485 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);3486 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);3487 3488 /* Set the accessed bit before committing the result into SS. */3489 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))3490 {3491 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);3492 if (rcStrict != VINF_SUCCESS)3493 return rcStrict;3494 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;3495 }3496 3497 /* Commit SS. */3498 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;3499 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;3500 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);3501 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;3502 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;3503 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;3504 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));3505 3506 /* CPL has changed, update IEM before loading rest of segments. */3507 IEM_SET_CPL(pVCpu, uNewCpl);3508 3509 /*3510 * Load the data segments for the new task.3511 */3512 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);3513 if (rcStrict != VINF_SUCCESS)3514 return rcStrict;3515 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);3516 if (rcStrict != VINF_SUCCESS)3517 return rcStrict;3518 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);3519 if (rcStrict != VINF_SUCCESS)3520 return rcStrict;3521 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);3522 if (rcStrict != VINF_SUCCESS)3523 return rcStrict;3524 3525 /*3526 * Load the code segment for the new task.3527 */3528 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))3529 {3530 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));3531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3532 }3533 3534 /* Fetch the descriptor. */3535 IEMSELDESC DescCS;3536 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);3537 if (rcStrict != VINF_SUCCESS)3538 {3539 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));3540 return rcStrict;3541 }3542 3543 /* CS must be a code segment. */3544 if ( !DescCS.Legacy.Gen.u1DescType3545 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))3546 {3547 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,3548 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));3549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3550 }3551 3552 /* For conforming CS, DPL must be less than or equal to the RPL. */3553 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)3554 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))3555 {3556 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,3557 DescCS.Legacy.Gen.u2Dpl));3558 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3559 }3560 3561 /* For non-conforming CS, DPL must match RPL. */3562 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)3563 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))3564 {3565 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,3566 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));3567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3568 }3569 3570 /* Is it there? */3571 if (!DescCS.Legacy.Gen.u1Present)3572 {3573 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));3574 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);3575 }3576 3577 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);3578 u64Base = X86DESC_BASE(&DescCS.Legacy);3579 3580 /* Set the accessed bit before committing the result into CS. */3581 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))3582 {3583 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);3584 if (rcStrict != VINF_SUCCESS)3585 return rcStrict;3586 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;3587 }3588 3589 /* Commit CS. */3590 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;3591 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;3592 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);3593 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;3594 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;3595 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;3596 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));3597 }3598 3599 /* Make sure the CPU mode is correct. */3600 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);3601 if (fExecNew != pVCpu->iem.s.fExec)3602 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));3603 pVCpu->iem.s.fExec = fExecNew;3604 3605 /** @todo Debug trap. */3606 if (fIsNewTss386 && fNewDebugTrap)3607 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));3608 3609 /*3610 * Construct the error code masks based on what caused this task switch.3611 * See Intel Instruction reference for INT.3612 */3613 uint16_t uExt;3614 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT3615 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3616 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))3617 uExt = 1;3618 else3619 uExt = 0;3620 3621 /*3622 * Push any error code on to the new stack.3623 */3624 if (fFlags & IEM_XCPT_FLAGS_ERR)3625 {3626 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);3627 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);3628 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;3629 3630 /* Check that there is sufficient space on the stack. */3631 /** @todo Factor out segment limit checking for normal/expand down segments3632 * into a separate function. */3633 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))3634 {3635 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS3636 || pVCpu->cpum.GstCtx.esp < cbStackFrame)3637 {3638 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */3639 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",3640 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));3641 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);3642 }3643 }3644 else3645 {3646 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))3647 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))3648 {3649 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",3650 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));3651 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);3652 }3653 }3654 3655 3656 if (fIsNewTss386)3657 rcStrict = iemMemStackPushU32(pVCpu, uErr);3658 else3659 rcStrict = iemMemStackPushU16(pVCpu, uErr);3660 if (rcStrict != VINF_SUCCESS)3661 {3662 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",3663 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));3664 return rcStrict;3665 }3666 }3667 3668 /* Check the new EIP against the new CS limit. */3669 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)3670 {3671 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",3672 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));3673 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */3674 return iemRaiseGeneralProtectionFault(pVCpu, uExt);3675 }3676 3677 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,3678 pVCpu->cpum.GstCtx.ss.Sel));3679 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;3680 }3681 3682 3683 /**3684 * Implements exceptions and interrupts for protected mode.3685 *3686 * @returns VBox strict status code.3687 * @param pVCpu The cross context virtual CPU structure of the calling thread.3688 * @param cbInstr The number of bytes to offset rIP by in the return3689 * address.3690 * @param u8Vector The interrupt / exception vector number.3691 * @param fFlags The flags.3692 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.3693 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.3694 */3695 static VBOXSTRICTRC3696 iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,3697 uint8_t cbInstr,3698 uint8_t u8Vector,3699 uint32_t fFlags,3700 uint16_t uErr,3701 uint64_t uCr2) RT_NOEXCEPT3702 {3703 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);3704 3705 /*3706 * Read the IDT entry.3707 */3708 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)3709 {3710 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));3711 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3712 }3713 X86DESC Idte;3714 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,3715 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);3716 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))3717 {3718 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));3719 return rcStrict;3720 }3721 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",3722 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,3723 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,3724 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));3725 3726 /*3727 * Check the descriptor type, DPL and such.3728 * ASSUMES this is done in the same order as described for call-gate calls.3729 */3730 if (Idte.Gate.u1DescType)3731 {3732 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));3733 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3734 }3735 bool fTaskGate = false;3736 uint8_t f32BitGate = true;3737 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;3738 switch (Idte.Gate.u4Type)3739 {3740 case X86_SEL_TYPE_SYS_UNDEFINED:3741 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:3742 case X86_SEL_TYPE_SYS_LDT:3743 case X86_SEL_TYPE_SYS_286_TSS_BUSY:3744 case X86_SEL_TYPE_SYS_286_CALL_GATE:3745 case X86_SEL_TYPE_SYS_UNDEFINED2:3746 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:3747 case X86_SEL_TYPE_SYS_UNDEFINED3:3748 case X86_SEL_TYPE_SYS_386_TSS_BUSY:3749 case X86_SEL_TYPE_SYS_386_CALL_GATE:3750 case X86_SEL_TYPE_SYS_UNDEFINED4:3751 {3752 /** @todo check what actually happens when the type is wrong...3753 * esp. call gates. */3754 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));3755 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3756 }3757 3758 case X86_SEL_TYPE_SYS_286_INT_GATE:3759 f32BitGate = false;3760 RT_FALL_THRU();3761 case X86_SEL_TYPE_SYS_386_INT_GATE:3762 fEflToClear |= X86_EFL_IF;3763 break;3764 3765 case X86_SEL_TYPE_SYS_TASK_GATE:3766 fTaskGate = true;3767 #ifndef IEM_IMPLEMENTS_TASKSWITCH3768 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));3769 #endif3770 break;3771 3772 case X86_SEL_TYPE_SYS_286_TRAP_GATE:3773 f32BitGate = false;3774 break;3775 case X86_SEL_TYPE_SYS_386_TRAP_GATE:3776 break;3777 3778 IEM_NOT_REACHED_DEFAULT_CASE_RET();3779 }3780 3781 /* Check DPL against CPL if applicable. */3782 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)3783 {3784 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)3785 {3786 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));3787 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3788 }3789 }3790 3791 /* Is it there? */3792 if (!Idte.Gate.u1Present)3793 {3794 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));3795 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));3796 }3797 3798 /* Is it a task-gate? */3799 if (fTaskGate)3800 {3801 /*3802 * Construct the error code masks based on what caused this task switch.3803 * See Intel Instruction reference for INT.3804 */3805 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3806 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;3807 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;3808 RTSEL SelTss = Idte.Gate.u16Sel;3809 3810 /*3811 * Fetch the TSS descriptor in the GDT.3812 */3813 IEMSELDESC DescTSS;3814 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);3815 if (rcStrict != VINF_SUCCESS)3816 {3817 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,3818 VBOXSTRICTRC_VAL(rcStrict)));3819 return rcStrict;3820 }3821 3822 /* The TSS descriptor must be a system segment and be available (not busy). */3823 if ( DescTSS.Legacy.Gen.u1DescType3824 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL3825 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))3826 {3827 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",3828 u8Vector, SelTss, DescTSS.Legacy.au64));3829 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);3830 }3831 3832 /* The TSS must be present. */3833 if (!DescTSS.Legacy.Gen.u1Present)3834 {3835 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));3836 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);3837 }3838 3839 /* Do the actual task switch. */3840 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,3841 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,3842 fFlags, uErr, uCr2, SelTss, &DescTSS);3843 }3844 3845 /* A null CS is bad. */3846 RTSEL NewCS = Idte.Gate.u16Sel;3847 if (!(NewCS & X86_SEL_MASK_OFF_RPL))3848 {3849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));3850 return iemRaiseGeneralProtectionFault0(pVCpu);3851 }3852 3853 /* Fetch the descriptor for the new CS. */3854 IEMSELDESC DescCS;3855 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */3856 if (rcStrict != VINF_SUCCESS)3857 {3858 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));3859 return rcStrict;3860 }3861 3862 /* Must be a code segment. */3863 if (!DescCS.Legacy.Gen.u1DescType)3864 {3865 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));3866 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3867 }3868 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))3869 {3870 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));3871 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3872 }3873 3874 /* Don't allow lowering the privilege level. */3875 /** @todo Does the lowering of privileges apply to software interrupts3876 * only? This has bearings on the more-privileged or3877 * same-privilege stack behavior further down. A testcase would3878 * be nice. */3879 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))3880 {3881 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",3882 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));3883 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);3884 }3885 3886 /* Make sure the selector is present. */3887 if (!DescCS.Legacy.Gen.u1Present)3888 {3889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));3890 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);3891 }3892 3893 #ifdef LOG_ENABLED3894 /* If software interrupt, try decode it if logging is enabled and such. */3895 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)3896 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))3897 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);3898 #endif3899 3900 /* Check the new EIP against the new CS limit. */3901 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE3902 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE3903 ? Idte.Gate.u16OffsetLow3904 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);3905 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);3906 if (uNewEip > cbLimitCS)3907 {3908 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",3909 u8Vector, uNewEip, cbLimitCS, NewCS));3910 return iemRaiseGeneralProtectionFault(pVCpu, 0);3911 }3912 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));3913 3914 /* Calc the flag image to push. */3915 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);3916 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))3917 fEfl &= ~X86_EFL_RF;3918 else3919 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */3920 3921 /* From V8086 mode only go to CPL 0. */3922 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF3923 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;3924 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */3925 {3926 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));3927 return iemRaiseGeneralProtectionFault(pVCpu, 0);3928 }3929 3930 /*3931 * If the privilege level changes, we need to get a new stack from the TSS.3932 * This in turns means validating the new SS and ESP...3933 */3934 if (uNewCpl != IEM_GET_CPL(pVCpu))3935 {3936 RTSEL NewSS;3937 uint32_t uNewEsp;3938 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);3939 if (rcStrict != VINF_SUCCESS)3940 return rcStrict;3941 3942 IEMSELDESC DescSS;3943 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);3944 if (rcStrict != VINF_SUCCESS)3945 return rcStrict;3946 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */3947 if (!DescSS.Legacy.Gen.u1DefBig)3948 {3949 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));3950 uNewEsp = (uint16_t)uNewEsp;3951 }3952 3953 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));3954 3955 /* Check that there is sufficient space for the stack frame. */3956 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);3957 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)3958 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate3959 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;3960 3961 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))3962 {3963 if ( uNewEsp - 1 > cbLimitSS3964 || uNewEsp < cbStackFrame)3965 {3966 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",3967 u8Vector, NewSS, uNewEsp, cbStackFrame));3968 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);3969 }3970 }3971 else3972 {3973 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)3974 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))3975 {3976 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",3977 u8Vector, NewSS, uNewEsp, cbStackFrame));3978 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);3979 }3980 }3981 3982 /*3983 * Start making changes.3984 */3985 3986 /* Set the new CPL so that stack accesses use it. */3987 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);3988 IEM_SET_CPL(pVCpu, uNewCpl);3989 3990 /* Create the stack frame. */3991 uint8_t bUnmapInfoStackFrame;3992 RTPTRUNION uStackFrame;3993 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,3994 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),3995 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */3996 if (rcStrict != VINF_SUCCESS)3997 return rcStrict;3998 if (f32BitGate)3999 {4000 if (fFlags & IEM_XCPT_FLAGS_ERR)4001 *uStackFrame.pu32++ = uErr;4002 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4003 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;4004 uStackFrame.pu32[2] = fEfl;4005 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;4006 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;4007 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));4008 if (fEfl & X86_EFL_VM)4009 {4010 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;4011 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;4012 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;4013 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;4014 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;4015 }4016 }4017 else4018 {4019 if (fFlags & IEM_XCPT_FLAGS_ERR)4020 *uStackFrame.pu16++ = uErr;4021 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;4022 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;4023 uStackFrame.pu16[2] = fEfl;4024 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;4025 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;4026 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));4027 if (fEfl & X86_EFL_VM)4028 {4029 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;4030 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;4031 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;4032 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;4033 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;4034 }4035 }4036 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);4037 if (rcStrict != VINF_SUCCESS)4038 return rcStrict;4039 4040 /* Mark the selectors 'accessed' (hope this is the correct time). */4041 /** @todo testcase: excatly _when_ are the accessed bits set - before or4042 * after pushing the stack frame? (Write protect the gdt + stack to4043 * find out.) */4044 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4045 {4046 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4047 if (rcStrict != VINF_SUCCESS)4048 return rcStrict;4049 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4050 }4051 4052 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4053 {4054 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);4055 if (rcStrict != VINF_SUCCESS)4056 return rcStrict;4057 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4058 }4059 4060 /*4061 * Start comitting the register changes (joins with the DPL=CPL branch).4062 */4063 pVCpu->cpum.GstCtx.ss.Sel = NewSS;4064 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;4065 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;4066 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;4067 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);4068 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);4069 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and4070 * 16-bit handler, the high word of ESP remains unchanged (i.e. only4071 * SP is loaded).4072 * Need to check the other combinations too:4073 * - 16-bit TSS, 32-bit handler4074 * - 32-bit TSS, 16-bit handler */4075 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)4076 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);4077 else4078 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;4079 4080 if (fEfl & X86_EFL_VM)4081 {4082 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);4083 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);4084 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);4085 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);4086 }4087 }4088 /*4089 * Same privilege, no stack change and smaller stack frame.4090 */4091 else4092 {4093 uint64_t uNewRsp;4094 uint8_t bUnmapInfoStackFrame;4095 RTPTRUNION uStackFrame;4096 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;4097 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,4098 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);4099 if (rcStrict != VINF_SUCCESS)4100 return rcStrict;4101 4102 if (f32BitGate)4103 {4104 if (fFlags & IEM_XCPT_FLAGS_ERR)4105 *uStackFrame.pu32++ = uErr;4106 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4107 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);4108 uStackFrame.pu32[2] = fEfl;4109 }4110 else4111 {4112 if (fFlags & IEM_XCPT_FLAGS_ERR)4113 *uStackFrame.pu16++ = uErr;4114 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;4115 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);4116 uStackFrame.pu16[2] = fEfl;4117 }4118 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */4119 if (rcStrict != VINF_SUCCESS)4120 return rcStrict;4121 4122 /* Mark the CS selector as 'accessed'. */4123 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4124 {4125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4126 if (rcStrict != VINF_SUCCESS)4127 return rcStrict;4128 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4129 }4130 4131 /*4132 * Start committing the register changes (joins with the other branch).4133 */4134 pVCpu->cpum.GstCtx.rsp = uNewRsp;4135 }4136 4137 /* ... register committing continues. */4138 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4139 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4140 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;4141 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;4142 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);4143 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);4144 4145 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */4146 fEfl &= ~fEflToClear;4147 IEMMISC_SET_EFL(pVCpu, fEfl);4148 4149 if (fFlags & IEM_XCPT_FLAGS_CR2)4150 pVCpu->cpum.GstCtx.cr2 = uCr2;4151 4152 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)4153 iemRaiseXcptAdjustState(pVCpu, u8Vector);4154 4155 /* Make sure the execution flags are correct. */4156 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);4157 if (fExecNew != pVCpu->iem.s.fExec)4158 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",4159 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));4160 pVCpu->iem.s.fExec = fExecNew;4161 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);4162 4163 /*4164 * Deal with debug events that follows the exception and clear inhibit flags.4165 */4166 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4167 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))4168 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4169 else4170 {4171 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",4172 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));4173 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);4174 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4175 >> CPUMCTX_DBG_HIT_DRX_SHIFT;4176 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4177 return iemRaiseDebugException(pVCpu);4178 }4179 4180 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;4181 }4182 4183 4184 /**4185 * Implements exceptions and interrupts for long mode.4186 *4187 * @returns VBox strict status code.4188 * @param pVCpu The cross context virtual CPU structure of the calling thread.4189 * @param cbInstr The number of bytes to offset rIP by in the return4190 * address.4191 * @param u8Vector The interrupt / exception vector number.4192 * @param fFlags The flags.4193 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.4194 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.4195 */4196 static VBOXSTRICTRC4197 iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,4198 uint8_t cbInstr,4199 uint8_t u8Vector,4200 uint32_t fFlags,4201 uint16_t uErr,4202 uint64_t uCr2) RT_NOEXCEPT4203 {4204 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4205 4206 /*4207 * Read the IDT entry.4208 */4209 uint16_t offIdt = (uint16_t)u8Vector << 4;4210 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)4211 {4212 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));4213 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4214 }4215 X86DESC64 Idte;4216 #ifdef _MSC_VER /* Shut up silly compiler warning. */4217 Idte.au64[0] = 0;4218 Idte.au64[1] = 0;4219 #endif4220 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);4221 if (RT_LIKELY(rcStrict == VINF_SUCCESS))4222 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);4223 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))4224 {4225 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));4226 return rcStrict;4227 }4228 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",4229 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,4230 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));4231 4232 /*4233 * Check the descriptor type, DPL and such.4234 * ASSUMES this is done in the same order as described for call-gate calls.4235 */4236 if (Idte.Gate.u1DescType)4237 {4238 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));4239 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4240 }4241 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;4242 switch (Idte.Gate.u4Type)4243 {4244 case AMD64_SEL_TYPE_SYS_INT_GATE:4245 fEflToClear |= X86_EFL_IF;4246 break;4247 case AMD64_SEL_TYPE_SYS_TRAP_GATE:4248 break;4249 4250 default:4251 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));4252 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4253 }4254 4255 /* Check DPL against CPL if applicable. */4256 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)4257 {4258 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)4259 {4260 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));4261 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4262 }4263 }4264 4265 /* Is it there? */4266 if (!Idte.Gate.u1Present)4267 {4268 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));4269 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));4270 }4271 4272 /* A null CS is bad. */4273 RTSEL NewCS = Idte.Gate.u16Sel;4274 if (!(NewCS & X86_SEL_MASK_OFF_RPL))4275 {4276 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));4277 return iemRaiseGeneralProtectionFault0(pVCpu);4278 }4279 4280 /* Fetch the descriptor for the new CS. */4281 IEMSELDESC DescCS;4282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);4283 if (rcStrict != VINF_SUCCESS)4284 {4285 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));4286 return rcStrict;4287 }4288 4289 /* Must be a 64-bit code segment. */4290 if (!DescCS.Long.Gen.u1DescType)4291 {4292 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));4293 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4294 }4295 if ( !DescCS.Long.Gen.u1Long4296 || DescCS.Long.Gen.u1DefBig4297 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )4298 {4299 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",4300 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));4301 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4302 }4303 4304 /* Don't allow lowering the privilege level. For non-conforming CS4305 selectors, the CS.DPL sets the privilege level the trap/interrupt4306 handler runs at. For conforming CS selectors, the CPL remains4307 unchanged, but the CS.DPL must be <= CPL. */4308 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched4309 * when CPU in Ring-0. Result \#GP? */4310 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))4311 {4312 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",4313 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));4314 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);4315 }4316 4317 4318 /* Make sure the selector is present. */4319 if (!DescCS.Legacy.Gen.u1Present)4320 {4321 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));4322 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);4323 }4324 4325 /* Check that the new RIP is canonical. */4326 uint64_t const uNewRip = Idte.Gate.u16OffsetLow4327 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)4328 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);4329 if (!IEM_IS_CANONICAL(uNewRip))4330 {4331 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));4332 return iemRaiseGeneralProtectionFault0(pVCpu);4333 }4334 4335 /*4336 * If the privilege level changes or if the IST isn't zero, we need to get4337 * a new stack from the TSS.4338 */4339 uint64_t uNewRsp;4340 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF4341 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;4342 if ( uNewCpl != IEM_GET_CPL(pVCpu)4343 || Idte.Gate.u3IST != 0)4344 {4345 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);4346 if (rcStrict != VINF_SUCCESS)4347 return rcStrict;4348 }4349 else4350 uNewRsp = pVCpu->cpum.GstCtx.rsp;4351 uNewRsp &= ~(uint64_t)0xf;4352 4353 /*4354 * Calc the flag image to push.4355 */4356 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);4357 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))4358 fEfl &= ~X86_EFL_RF;4359 else4360 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */4361 4362 /*4363 * Start making changes.4364 */4365 /* Set the new CPL so that stack accesses use it. */4366 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);4367 IEM_SET_CPL(pVCpu, uNewCpl);4368 /** @todo Setting CPL this early seems wrong as it would affect and errors we4369 * raise accessing the stack and (?) GDT/LDT... */4370 4371 /* Create the stack frame. */4372 uint8_t bUnmapInfoStackFrame;4373 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));4374 RTPTRUNION uStackFrame;4375 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,4376 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */4377 if (rcStrict != VINF_SUCCESS)4378 return rcStrict;4379 4380 if (fFlags & IEM_XCPT_FLAGS_ERR)4381 *uStackFrame.pu64++ = uErr;4382 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;4383 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */4384 uStackFrame.pu64[2] = fEfl;4385 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;4386 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;4387 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);4388 if (rcStrict != VINF_SUCCESS)4389 return rcStrict;4390 4391 /* Mark the CS selectors 'accessed' (hope this is the correct time). */4392 /** @todo testcase: excatly _when_ are the accessed bits set - before or4393 * after pushing the stack frame? (Write protect the gdt + stack to4394 * find out.) */4395 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))4396 {4397 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);4398 if (rcStrict != VINF_SUCCESS)4399 return rcStrict;4400 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;4401 }4402 4403 /*4404 * Start comitting the register changes.4405 */4406 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the4407 * hidden registers when interrupting 32-bit or 16-bit code! */4408 if (uNewCpl != uOldCpl)4409 {4410 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;4411 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;4412 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;4413 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;4414 pVCpu->cpum.GstCtx.ss.u64Base = 0;4415 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;4416 }4417 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;4418 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4419 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;4420 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;4421 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);4422 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);4423 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);4424 pVCpu->cpum.GstCtx.rip = uNewRip;4425 4426 fEfl &= ~fEflToClear;4427 IEMMISC_SET_EFL(pVCpu, fEfl);4428 4429 if (fFlags & IEM_XCPT_FLAGS_CR2)4430 pVCpu->cpum.GstCtx.cr2 = uCr2;4431 4432 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)4433 iemRaiseXcptAdjustState(pVCpu, u8Vector);4434 4435 iemRecalcExecModeAndCplAndAcFlags(pVCpu);4436 4437 /*4438 * Deal with debug events that follows the exception and clear inhibit flags.4439 */4440 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4441 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))4442 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4443 else4444 {4445 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",4446 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));4447 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);4448 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4449 >> CPUMCTX_DBG_HIT_DRX_SHIFT;4450 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);4451 return iemRaiseDebugException(pVCpu);4452 }4453 4454 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;4455 }4456 4457 4458 /**4459 * Implements exceptions and interrupts.4460 *4461 * All exceptions and interrupts goes thru this function!4462 *4463 * @returns VBox strict status code.4464 * @param pVCpu The cross context virtual CPU structure of the calling thread.4465 * @param cbInstr The number of bytes to offset rIP by in the return4466 * address.4467 * @param u8Vector The interrupt / exception vector number.4468 * @param fFlags The flags.4469 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.4470 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.4471 */4472 VBOXSTRICTRC4473 iemRaiseXcptOrInt(PVMCPUCC pVCpu,4474 uint8_t cbInstr,4475 uint8_t u8Vector,4476 uint32_t fFlags,4477 uint16_t uErr,4478 uint64_t uCr2) RT_NOEXCEPT4479 {4480 /*4481 * Get all the state that we might need here.4482 */4483 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4484 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);4485 4486 #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */4487 /*4488 * Flush prefetch buffer4489 */4490 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;4491 #endif4492 4493 /*4494 * Perform the V8086 IOPL check and upgrade the fault without nesting.4495 */4496 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM4497 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 34498 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT4499 | IEM_XCPT_FLAGS_BP_INSTR4500 | IEM_XCPT_FLAGS_ICEBP_INSTR4501 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT4502 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )4503 {4504 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));4505 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;4506 u8Vector = X86_XCPT_GP;4507 uErr = 0;4508 }4509 4510 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);4511 #ifdef DBGFTRACE_ENABLED4512 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",4513 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,4514 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);4515 #endif4516 4517 /*4518 * Check if DBGF wants to intercept the exception.4519 */4520 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))4521 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )4522 { /* likely */ }4523 else4524 {4525 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),4526 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);4527 if (rcStrict != VINF_SUCCESS)4528 return rcStrict;4529 }4530 4531 /*4532 * Evaluate whether NMI blocking should be in effect.4533 * Normally, NMI blocking is in effect whenever we inject an NMI.4534 */4535 bool fBlockNmi = u8Vector == X86_XCPT_NMI4536 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);4537 4538 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4539 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))4540 {4541 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);4542 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)4543 return rcStrict0;4544 4545 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */4546 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)4547 {4548 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));4549 fBlockNmi = false;4550 }4551 }4552 #endif4553 4554 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM4555 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))4556 {4557 /*4558 * If the event is being injected as part of VMRUN, it isn't subject to event4559 * intercepts in the nested-guest. However, secondary exceptions that occur4560 * during injection of any event -are- subject to exception intercepts.4561 *4562 * See AMD spec. 15.20 "Event Injection".4563 */4564 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)4565 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;4566 else4567 {4568 /*4569 * Check and handle if the event being raised is intercepted.4570 */4571 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4572 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)4573 return rcStrict0;4574 }4575 }4576 #endif4577 4578 /*4579 * Set NMI blocking if necessary.4580 */4581 if (fBlockNmi)4582 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);4583 4584 /*4585 * Do recursion accounting.4586 */4587 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;4588 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;4589 if (pVCpu->iem.s.cXcptRecursions == 0)4590 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",4591 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));4592 else4593 {4594 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",4595 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,4596 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));4597 4598 if (pVCpu->iem.s.cXcptRecursions >= 4)4599 {4600 #ifdef DEBUG_bird4601 AssertFailed();4602 #endif4603 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));4604 }4605 4606 /*4607 * Evaluate the sequence of recurring events.4608 */4609 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,4610 NULL /* pXcptRaiseInfo */);4611 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)4612 { /* likely */ }4613 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)4614 {4615 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));4616 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;4617 u8Vector = X86_XCPT_DF;4618 uErr = 0;4619 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX4620 /* VMX nested-guest #DF intercept needs to be checked here. */4621 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))4622 {4623 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);4624 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)4625 return rcStrict0;4626 }4627 #endif4628 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */4629 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))4630 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4631 }4632 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)4633 {4634 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));4635 return iemInitiateCpuShutdown(pVCpu);4636 }4637 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)4638 {4639 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */4640 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));4641 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))4642 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))4643 return VERR_EM_GUEST_CPU_HANG;4644 }4645 else4646 {4647 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",4648 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));4649 return VERR_IEM_IPE_9;4650 }4651 4652 /*4653 * The 'EXT' bit is set when an exception occurs during deliver of an external4654 * event (such as an interrupt or earlier exception)[1]. Privileged software4655 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software4656 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.4657 *4658 * [1] - Intel spec. 6.13 "Error Code"4659 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".4660 * [3] - Intel Instruction reference for INT n.4661 */4662 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))4663 && (fFlags & IEM_XCPT_FLAGS_ERR)4664 && u8Vector != X86_XCPT_PF4665 && u8Vector != X86_XCPT_DF)4666 {4667 uErr |= X86_TRAP_ERR_EXTERNAL;4668 }4669 }4670 4671 pVCpu->iem.s.cXcptRecursions++;4672 pVCpu->iem.s.uCurXcpt = u8Vector;4673 pVCpu->iem.s.fCurXcpt = fFlags;4674 pVCpu->iem.s.uCurXcptErr = uErr;4675 pVCpu->iem.s.uCurXcptCr2 = uCr2;4676 4677 /*4678 * Extensive logging.4679 */4680 #if defined(LOG_ENABLED) && defined(IN_RING3)4681 if (LogIs3Enabled())4682 {4683 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);4684 char szRegs[4096];4685 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),4686 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"4687 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"4688 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"4689 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"4690 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"4691 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"4692 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"4693 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"4694 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"4695 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"4696 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"4697 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"4698 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"4699 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"4700 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"4701 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"4702 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"4703 " efer=%016VR{efer}\n"4704 " pat=%016VR{pat}\n"4705 " sf_mask=%016VR{sf_mask}\n"4706 "krnl_gs_base=%016VR{krnl_gs_base}\n"4707 " lstar=%016VR{lstar}\n"4708 " star=%016VR{star} cstar=%016VR{cstar}\n"4709 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"4710 );4711 4712 char szInstr[256];4713 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,4714 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,4715 szInstr, sizeof(szInstr), NULL);4716 Log3(("%s%s\n", szRegs, szInstr));4717 }4718 #endif /* LOG_ENABLED */4719 4720 /*4721 * Stats.4722 */4723 uint64_t const uTimestamp = ASMReadTSC();4724 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))4725 {4726 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });4727 EMHistoryAddExit(pVCpu,4728 fFlags & IEM_XCPT_FLAGS_T_EXT_INT4729 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)4730 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),4731 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);4732 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);4733 }4734 else4735 {4736 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))4737 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);4738 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),4739 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);4740 if (fFlags & IEM_XCPT_FLAGS_ERR)4741 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);4742 if (fFlags & IEM_XCPT_FLAGS_CR2)4743 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);4744 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);4745 }4746 4747 /*4748 * Hack alert! Convert incoming debug events to slient on Intel.4749 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.4750 */4751 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)4752 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)4753 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))4754 { /* ignore */ }4755 else4756 {4757 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",4758 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));4759 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)4760 | CPUMCTX_DBG_HIT_DRX_SILENT;4761 }4762 4763 /*4764 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)4765 * to ensure that a stale TLB or paging cache entry will only cause one4766 * spurious #PF.4767 */4768 if ( u8Vector == X86_XCPT_PF4769 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))4770 IEMTlbInvalidatePage(pVCpu, uCr2);4771 4772 /*4773 * Call the mode specific worker function.4774 */4775 VBOXSTRICTRC rcStrict;4776 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))4777 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4778 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)4779 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4780 else4781 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4782 4783 /* Flush the prefetch buffer. */4784 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));4785 4786 /*4787 * Unwind.4788 */4789 pVCpu->iem.s.cXcptRecursions--;4790 pVCpu->iem.s.uCurXcpt = uPrevXcpt;4791 pVCpu->iem.s.fCurXcpt = fPrevXcpt;4792 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",4793 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,4794 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));4795 return rcStrict;4796 }4797 4798 #ifdef IEM_WITH_SETJMP4799 /**4800 * See iemRaiseXcptOrInt. Will not return.4801 */4802 DECL_NO_RETURN(void)4803 iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,4804 uint8_t cbInstr,4805 uint8_t u8Vector,4806 uint32_t fFlags,4807 uint16_t uErr,4808 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP4809 {4810 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);4811 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));4812 }4813 #endif4814 4815 4816 /** \#DE - 00. */4817 VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT4818 {4819 if (GCMIsInterceptingXcptDE(pVCpu))4820 {4821 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);4822 if (rc == VINF_SUCCESS)4823 {4824 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));4825 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */4826 }4827 }4828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4829 }4830 4831 4832 #ifdef IEM_WITH_SETJMP4833 /** \#DE - 00. */4834 DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4835 {4836 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4837 }4838 #endif4839 4840 4841 /** \#DB - 01.4842 * @note This automatically clear DR7.GD. */4843 VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT4844 {4845 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */4846 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;4847 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);4848 }4849 4850 4851 /** \#BR - 05. */4852 VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT4853 {4854 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4855 }4856 4857 4858 /** \#UD - 06. */4859 VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT4860 {4861 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4862 }4863 4864 4865 #ifdef IEM_WITH_SETJMP4866 /** \#UD - 06. */4867 DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4868 {4869 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4870 }4871 #endif4872 4873 4874 /** \#NM - 07. */4875 VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT4876 {4877 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4878 }4879 4880 4881 #ifdef IEM_WITH_SETJMP4882 /** \#NM - 07. */4883 DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4884 {4885 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);4886 }4887 #endif4888 4889 4890 /** \#TS(err) - 0a. */4891 VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4892 {4893 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4894 }4895 4896 4897 /** \#TS(tr) - 0a. */4898 VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT4899 {4900 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4901 pVCpu->cpum.GstCtx.tr.Sel, 0);4902 }4903 4904 4905 /** \#TS(0) - 0a. */4906 VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT4907 {4908 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4909 0, 0);4910 }4911 4912 4913 /** \#TS(err) - 0a. */4914 VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4915 {4916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4917 uSel & X86_SEL_MASK_OFF_RPL, 0);4918 }4919 4920 4921 /** \#NP(err) - 0b. */4922 VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4923 {4924 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4925 }4926 4927 4928 /** \#NP(sel) - 0b. */4929 VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4930 {4931 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",4932 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));4933 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4934 uSel & ~X86_SEL_RPL, 0);4935 }4936 4937 4938 /** \#SS(seg) - 0c. */4939 VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT4940 {4941 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",4942 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));4943 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4944 uSel & ~X86_SEL_RPL, 0);4945 }4946 4947 4948 /** \#SS(err) - 0c. */4949 VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4950 {4951 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",4952 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));4953 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4954 }4955 4956 4957 /** \#GP(n) - 0d. */4958 VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT4959 {4960 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));4961 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);4962 }4963 4964 4965 /** \#GP(0) - 0d. */4966 VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT4967 {4968 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4969 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4970 }4971 4972 #ifdef IEM_WITH_SETJMP4973 /** \#GP(0) - 0d. */4974 DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP4975 {4976 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4977 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4978 }4979 #endif4980 4981 4982 /** \#GP(sel) - 0d. */4983 VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT4984 {4985 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",4986 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));4987 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,4988 Sel & ~X86_SEL_RPL, 0);4989 }4990 4991 4992 /** \#GP(0) - 0d. */4993 VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT4994 {4995 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));4996 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);4997 }4998 4999 5000 /** \#GP(sel) - 0d. */5001 VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT5002 {5003 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5004 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5005 NOREF(iSegReg); NOREF(fAccess);5006 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,5007 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5008 }5009 5010 #ifdef IEM_WITH_SETJMP5011 /** \#GP(sel) - 0d, longjmp. */5012 DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP5013 {5014 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5015 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5016 NOREF(iSegReg); NOREF(fAccess);5017 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,5018 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5019 }5020 #endif5021 5022 /** \#GP(sel) - 0d. */5023 VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT5024 {5025 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));5027 NOREF(Sel);5028 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5029 }5030 5031 #ifdef IEM_WITH_SETJMP5032 /** \#GP(sel) - 0d, longjmp. */5033 DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP5034 {5035 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",5036 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));5037 NOREF(Sel);5038 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5039 }5040 #endif5041 5042 5043 /** \#GP(sel) - 0d. */5044 VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT5045 {5046 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",5047 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));5048 NOREF(iSegReg); NOREF(fAccess);5049 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5050 }5051 5052 #ifdef IEM_WITH_SETJMP5053 /** \#GP(sel) - 0d, longjmp. */5054 DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP5055 {5056 NOREF(iSegReg); NOREF(fAccess);5057 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5058 }5059 #endif5060 5061 5062 /** \#PF(n) - 0e. */5063 VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT5064 {5065 uint16_t uErr;5066 switch (rc)5067 {5068 case VERR_PAGE_NOT_PRESENT:5069 case VERR_PAGE_TABLE_NOT_PRESENT:5070 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:5071 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:5072 uErr = 0;5073 break;5074 5075 case VERR_RESERVED_PAGE_TABLE_BITS:5076 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;5077 break;5078 5079 default:5080 AssertMsgFailed(("%Rrc\n", rc));5081 RT_FALL_THRU();5082 case VERR_ACCESS_DENIED:5083 uErr = X86_TRAP_PF_P;5084 break;5085 }5086 5087 if (IEM_GET_CPL(pVCpu) == 3)5088 uErr |= X86_TRAP_PF_US;5089 5090 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE5091 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)5092 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )5093 uErr |= X86_TRAP_PF_ID;5094 5095 #if 0 /* This is so much non-sense, really. Why was it done like that? */5096 /* Note! RW access callers reporting a WRITE protection fault, will clear5097 the READ flag before calling. So, read-modify-write accesses (RW)5098 can safely be reported as READ faults. */5099 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)5100 uErr |= X86_TRAP_PF_RW;5101 #else5102 if (fAccess & IEM_ACCESS_TYPE_WRITE)5103 {5104 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg5105 /// (regardless of outcome of the comparison in the latter case).5106 //if (!(fAccess & IEM_ACCESS_TYPE_READ))5107 uErr |= X86_TRAP_PF_RW;5108 }5109 #endif5110 5111 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address5112 of the memory operand rather than at the start of it. (Not sure what5113 happens if it crosses a page boundrary.) The current heuristics for5114 this is to report the #PF for the last byte if the access is more than5115 64 bytes. This is probably not correct, but we can work that out later,5116 main objective now is to get FXSAVE to work like for real hardware and5117 make bs3-cpu-basic2 work. */5118 if (cbAccess <= 64)5119 { /* likely*/ }5120 else5121 GCPtrWhere += cbAccess - 1;5122 5123 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,5124 uErr, GCPtrWhere);5125 }5126 5127 #ifdef IEM_WITH_SETJMP5128 /** \#PF(n) - 0e, longjmp. */5129 DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,5130 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP5131 {5132 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));5133 }5134 #endif5135 5136 5137 /** \#MF(0) - 10. */5138 VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT5139 {5140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)5141 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5142 5143 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */5144 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);5145 return iemRegUpdateRipAndFinishClearingRF(pVCpu);5146 }5147 5148 #ifdef IEM_WITH_SETJMP5149 /** \#MF(0) - 10, longjmp. */5150 DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5151 {5152 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));5153 }5154 #endif5155 5156 5157 /** \#AC(0) - 11. */5158 VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT5159 {5160 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);5161 }5162 5163 #ifdef IEM_WITH_SETJMP5164 /** \#AC(0) - 11, longjmp. */5165 DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5166 {5167 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));5168 }5169 #endif5170 5171 5172 /** \#XF(0)/\#XM(0) - 19. */5173 VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT5174 {5175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5176 }5177 5178 5179 #ifdef IEM_WITH_SETJMP5180 /** \#XF(0)/\#XM(0) - 19s, longjmp. */5181 DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP5182 {5183 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));5184 }5185 #endif5186 5187 5188 /** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */5189 IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)5190 {5191 NOREF(cbInstr);5192 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5193 }5194 5195 5196 /** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */5197 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)5198 {5199 NOREF(cbInstr);5200 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5201 }5202 5203 5204 /** Accessed via IEMOP_RAISE_INVALID_OPCODE. */5205 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)5206 {5207 NOREF(cbInstr);5208 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);5209 }5210 5211 5212 /** @} */5213 5214 /** @name Common opcode decoders.5215 * @{5216 */5217 //#include <iprt/mem.h>5218 5219 /**5220 * Used to add extra details about a stub case.5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.5222 */5223 void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT5224 {5225 #if defined(LOG_ENABLED) && defined(IN_RING3)5226 PVM pVM = pVCpu->CTX_SUFF(pVM);5227 char szRegs[4096];5228 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),5229 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"5230 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"5231 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"5232 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"5233 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"5234 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"5235 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"5236 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"5237 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"5238 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"5239 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"5240 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"5241 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"5242 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"5243 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"5244 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"5245 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"5246 " efer=%016VR{efer}\n"5247 " pat=%016VR{pat}\n"5248 " sf_mask=%016VR{sf_mask}\n"5249 "krnl_gs_base=%016VR{krnl_gs_base}\n"5250 " lstar=%016VR{lstar}\n"5251 " star=%016VR{star} cstar=%016VR{cstar}\n"5252 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"5253 );5254 5255 char szInstr[256];5256 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,5257 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,5258 szInstr, sizeof(szInstr), NULL);5259 5260 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);5261 #else5262 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);5263 #endif5264 }5265 5266 /** @} */5267 5268 5269 5270 /** @name Register Access.5271 * @{5272 */5273 5274 /**5275 * Adds a 8-bit signed jump offset to RIP/EIP/IP.5276 *5277 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5278 * segment limit.5279 *5280 * @param pVCpu The cross context virtual CPU structure of the calling thread.5281 * @param cbInstr Instruction size.5282 * @param offNextInstr The offset of the next instruction.5283 * @param enmEffOpSize Effective operand size.5284 */5285 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,5286 IEMMODE enmEffOpSize) RT_NOEXCEPT5287 {5288 switch (enmEffOpSize)5289 {5290 case IEMMODE_16BIT:5291 {5292 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;5293 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit5294 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))5295 pVCpu->cpum.GstCtx.rip = uNewIp;5296 else5297 return iemRaiseGeneralProtectionFault0(pVCpu);5298 break;5299 }5300 5301 case IEMMODE_32BIT:5302 {5303 Assert(!IEM_IS_64BIT_CODE(pVCpu));5304 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);5305 5306 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;5307 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))5308 pVCpu->cpum.GstCtx.rip = uNewEip;5309 else5310 return iemRaiseGeneralProtectionFault0(pVCpu);5311 break;5312 }5313 5314 case IEMMODE_64BIT:5315 {5316 Assert(IEM_IS_64BIT_CODE(pVCpu));5317 5318 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;5319 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))5320 pVCpu->cpum.GstCtx.rip = uNewRip;5321 else5322 return iemRaiseGeneralProtectionFault0(pVCpu);5323 break;5324 }5325 5326 IEM_NOT_REACHED_DEFAULT_CASE_RET();5327 }5328 5329 #ifndef IEM_WITH_CODE_TLB5330 /* Flush the prefetch buffer. */5331 pVCpu->iem.s.cbOpcode = cbInstr;5332 #endif5333 5334 /*5335 * Clear RF and finish the instruction (maybe raise #DB).5336 */5337 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5338 }5339 5340 5341 /**5342 * Adds a 16-bit signed jump offset to RIP/EIP/IP.5343 *5344 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5345 * segment limit.5346 *5347 * @returns Strict VBox status code.5348 * @param pVCpu The cross context virtual CPU structure of the calling thread.5349 * @param cbInstr Instruction size.5350 * @param offNextInstr The offset of the next instruction.5351 */5352 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT5353 {5354 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);5355 5356 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;5357 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit5358 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))5359 pVCpu->cpum.GstCtx.rip = uNewIp;5360 else5361 return iemRaiseGeneralProtectionFault0(pVCpu);5362 5363 #ifndef IEM_WITH_CODE_TLB5364 /* Flush the prefetch buffer. */5365 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);5366 #endif5367 5368 /*5369 * Clear RF and finish the instruction (maybe raise #DB).5370 */5371 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5372 }5373 5374 5375 /**5376 * Adds a 32-bit signed jump offset to RIP/EIP/IP.5377 *5378 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5379 * segment limit.5380 *5381 * @returns Strict VBox status code.5382 * @param pVCpu The cross context virtual CPU structure of the calling thread.5383 * @param cbInstr Instruction size.5384 * @param offNextInstr The offset of the next instruction.5385 * @param enmEffOpSize Effective operand size.5386 */5387 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,5388 IEMMODE enmEffOpSize) RT_NOEXCEPT5389 {5390 if (enmEffOpSize == IEMMODE_32BIT)5391 {5392 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));5393 5394 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;5395 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))5396 pVCpu->cpum.GstCtx.rip = uNewEip;5397 else5398 return iemRaiseGeneralProtectionFault0(pVCpu);5399 }5400 else5401 {5402 Assert(enmEffOpSize == IEMMODE_64BIT);5403 5404 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;5405 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))5406 pVCpu->cpum.GstCtx.rip = uNewRip;5407 else5408 return iemRaiseGeneralProtectionFault0(pVCpu);5409 }5410 5411 #ifndef IEM_WITH_CODE_TLB5412 /* Flush the prefetch buffer. */5413 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);5414 #endif5415 5416 /*5417 * Clear RF and finish the instruction (maybe raise #DB).5418 */5419 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5420 }5421 5422 /** @} */5423 53 5424 54 … … 6116 746 /** @} */ 6117 747 6118 6119 /** @name Memory access.6120 *6121 * @{6122 */6123 6124 #undef LOG_GROUP6125 #define LOG_GROUP LOG_GROUP_IEM_MEM6126 6127 /**6128 * Applies the segment limit, base and attributes.6129 *6130 * This may raise a \#GP or \#SS.6131 *6132 * @returns VBox strict status code.6133 *6134 * @param pVCpu The cross context virtual CPU structure of the calling thread.6135 * @param fAccess The kind of access which is being performed.6136 * @param iSegReg The index of the segment register to apply.6137 * This is UINT8_MAX if none (for IDT, GDT, LDT,6138 * TSS, ++).6139 * @param cbMem The access size.6140 * @param pGCPtrMem Pointer to the guest memory address to apply6141 * segmentation to. Input and output parameter.6142 */6143 VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT6144 {6145 if (iSegReg == UINT8_MAX)6146 return VINF_SUCCESS;6147 6148 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));6149 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);6150 switch (IEM_GET_CPU_MODE(pVCpu))6151 {6152 case IEMMODE_16BIT:6153 case IEMMODE_32BIT:6154 {6155 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;6156 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;6157 6158 if ( pSel->Attr.n.u1Present6159 && !pSel->Attr.n.u1Unusable)6160 {6161 Assert(pSel->Attr.n.u1DescType);6162 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))6163 {6164 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)6165 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )6166 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);6167 6168 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))6169 {6170 /** @todo CPL check. */6171 }6172 6173 /*6174 * There are two kinds of data selectors, normal and expand down.6175 */6176 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))6177 {6178 if ( GCPtrFirst32 > pSel->u32Limit6179 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */6180 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6181 }6182 else6183 {6184 /*6185 * The upper boundary is defined by the B bit, not the G bit!6186 */6187 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)6188 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))6189 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6190 }6191 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;6192 }6193 else6194 {6195 /*6196 * Code selector and usually be used to read thru, writing is6197 * only permitted in real and V8086 mode.6198 */6199 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)6200 || ( (fAccess & IEM_ACCESS_TYPE_READ)6201 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )6202 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )6203 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);6204 6205 if ( GCPtrFirst32 > pSel->u32Limit6206 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */6207 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6208 6209 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))6210 {6211 /** @todo CPL check. */6212 }6213 6214 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;6215 }6216 }6217 else6218 return iemRaiseGeneralProtectionFault0(pVCpu);6219 return VINF_SUCCESS;6220 }6221 6222 case IEMMODE_64BIT:6223 {6224 RTGCPTR GCPtrMem = *pGCPtrMem;6225 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)6226 *pGCPtrMem = GCPtrMem + pSel->u64Base;6227 6228 Assert(cbMem >= 1);6229 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))6230 return VINF_SUCCESS;6231 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.6232 * 4.12.2 "Data Limit Checks in 64-bit Mode". */6233 return iemRaiseGeneralProtectionFault0(pVCpu);6234 }6235 6236 default:6237 AssertFailedReturn(VERR_IEM_IPE_7);6238 }6239 }6240 6241 6242 /**6243 * Translates a virtual address to a physical physical address and checks if we6244 * can access the page as specified.6245 *6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.6247 * @param GCPtrMem The virtual address.6248 * @param cbAccess The access size, for raising \#PF correctly for6249 * FXSAVE and such.6250 * @param fAccess The intended access.6251 * @param pGCPhysMem Where to return the physical address.6252 */6253 VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,6254 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT6255 {6256 /** @todo Need a different PGM interface here. We're currently using6257 * generic / REM interfaces. this won't cut it for R0. */6258 /** @todo If/when PGM handles paged real-mode, we can remove the hack in6259 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault6260 * here. */6261 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));6262 PGMPTWALKFAST WalkFast;6263 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);6264 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);6265 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);6266 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);6267 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))6268 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);6269 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))6270 fQPage |= PGMQPAGE_F_USER_MODE;6271 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);6272 if (RT_SUCCESS(rc))6273 {6274 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);6275 6276 /* If the page is writable and does not have the no-exec bit set, all6277 access is allowed. Otherwise we'll have to check more carefully... */6278 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)6279 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)6280 || (WalkFast.fEffective & X86_PTE_RW)6281 || ( ( IEM_GET_CPL(pVCpu) != 36282 || (fAccess & IEM_ACCESS_WHAT_SYS))6283 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )6284 && ( (WalkFast.fEffective & X86_PTE_US)6285 || IEM_GET_CPL(pVCpu) != 36286 || (fAccess & IEM_ACCESS_WHAT_SYS) )6287 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)6288 || !(WalkFast.fEffective & X86_PTE_PAE_NX)6289 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )6290 )6291 );6292 6293 /* PGMGstQueryPageFast sets the A & D bits. */6294 /** @todo testcase: check when A and D bits are actually set by the CPU. */6295 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));6296 6297 *pGCPhysMem = WalkFast.GCPhys;6298 return VINF_SUCCESS;6299 }6300 6301 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));6302 /** @todo Check unassigned memory in unpaged mode. */6303 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT6304 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)6305 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);6306 #endif6307 *pGCPhysMem = NIL_RTGCPHYS;6308 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);6309 }6310 6311 #if 0 /*unused*/6312 /**6313 * Looks up a memory mapping entry.6314 *6315 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).6316 * @param pVCpu The cross context virtual CPU structure of the calling thread.6317 * @param pvMem The memory address.6318 * @param fAccess The access to.6319 */6320 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)6321 {6322 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));6323 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;6324 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem6325 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6326 return 0;6327 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem6328 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6329 return 1;6330 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem6331 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6332 return 2;6333 return VERR_NOT_FOUND;6334 }6335 #endif6336 6337 /**6338 * Finds a free memmap entry when using iNextMapping doesn't work.6339 *6340 * @returns Memory mapping index, 1024 on failure.6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.6342 */6343 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)6344 {6345 /*6346 * The easy case.6347 */6348 if (pVCpu->iem.s.cActiveMappings == 0)6349 {6350 pVCpu->iem.s.iNextMapping = 1;6351 return 0;6352 }6353 6354 /* There should be enough mappings for all instructions. */6355 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);6356 6357 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)6358 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)6359 return i;6360 6361 AssertFailedReturn(1024);6362 }6363 6364 6365 /**6366 * Commits a bounce buffer that needs writing back and unmaps it.6367 *6368 * @returns Strict VBox status code.6369 * @param pVCpu The cross context virtual CPU structure of the calling thread.6370 * @param iMemMap The index of the buffer to commit.6371 * @param fPostponeFail Whether we can postpone writer failures to ring-3.6372 * Always false in ring-3, obviously.6373 */6374 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)6375 {6376 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);6377 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);6378 #ifdef IN_RING36379 Assert(!fPostponeFail);6380 RT_NOREF_PV(fPostponeFail);6381 #endif6382 6383 /*6384 * Do the writing.6385 */6386 PVMCC pVM = pVCpu->CTX_SUFF(pVM);6387 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)6388 {6389 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;6390 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;6391 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6392 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6393 {6394 /*6395 * Carefully and efficiently dealing with access handler return6396 * codes make this a little bloated.6397 */6398 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,6399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,6400 pbBuf,6401 cbFirst,6402 PGMACCESSORIGIN_IEM);6403 if (rcStrict == VINF_SUCCESS)6404 {6405 if (cbSecond)6406 {6407 rcStrict = PGMPhysWrite(pVM,6408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6409 pbBuf + cbFirst,6410 cbSecond,6411 PGMACCESSORIGIN_IEM);6412 if (rcStrict == VINF_SUCCESS)6413 { /* nothing */ }6414 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6415 {6416 LogEx(LOG_GROUP_IEM,6417 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",6418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6420 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6421 }6422 #ifndef IN_RING36423 else if (fPostponeFail)6424 {6425 LogEx(LOG_GROUP_IEM,6426 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6429 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;6430 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6431 return iemSetPassUpStatus(pVCpu, rcStrict);6432 }6433 #endif6434 else6435 {6436 LogEx(LOG_GROUP_IEM,6437 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6440 return rcStrict;6441 }6442 }6443 }6444 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6445 {6446 if (!cbSecond)6447 {6448 LogEx(LOG_GROUP_IEM,6449 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",6450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));6451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6452 }6453 else6454 {6455 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,6456 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6457 pbBuf + cbFirst,6458 cbSecond,6459 PGMACCESSORIGIN_IEM);6460 if (rcStrict2 == VINF_SUCCESS)6461 {6462 LogEx(LOG_GROUP_IEM,6463 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",6464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6466 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6467 }6468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))6469 {6470 LogEx(LOG_GROUP_IEM,6471 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",6472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));6474 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);6475 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6476 }6477 #ifndef IN_RING36478 else if (fPostponeFail)6479 {6480 LogEx(LOG_GROUP_IEM,6481 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6484 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;6485 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6486 return iemSetPassUpStatus(pVCpu, rcStrict);6487 }6488 #endif6489 else6490 {6491 LogEx(LOG_GROUP_IEM,6492 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));6495 return rcStrict2;6496 }6497 }6498 }6499 #ifndef IN_RING36500 else if (fPostponeFail)6501 {6502 LogEx(LOG_GROUP_IEM,6503 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6506 if (!cbSecond)6507 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;6508 else6509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;6510 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6511 return iemSetPassUpStatus(pVCpu, rcStrict);6512 }6513 #endif6514 else6515 {6516 LogEx(LOG_GROUP_IEM,6517 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",6518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6519 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6520 return rcStrict;6521 }6522 }6523 else6524 {6525 /*6526 * No access handlers, much simpler.6527 */6528 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);6529 if (RT_SUCCESS(rc))6530 {6531 if (cbSecond)6532 {6533 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);6534 if (RT_SUCCESS(rc))6535 { /* likely */ }6536 else6537 {6538 LogEx(LOG_GROUP_IEM,6539 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6540 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6541 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));6542 return rc;6543 }6544 }6545 }6546 else6547 {6548 LogEx(LOG_GROUP_IEM,6549 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",6550 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,6551 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6552 return rc;6553 }6554 }6555 }6556 6557 #if defined(IEM_LOG_MEMORY_WRITES)6558 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,6559 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));6560 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)6561 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6562 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),6563 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));6564 6565 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;6566 g_cbIemWrote = cbWrote;6567 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));6568 #endif6569 6570 /*6571 * Free the mapping entry.6572 */6573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;6574 Assert(pVCpu->iem.s.cActiveMappings != 0);6575 pVCpu->iem.s.cActiveMappings--;6576 return VINF_SUCCESS;6577 }6578 6579 6580 /**6581 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.6582 */6583 DECL_FORCE_INLINE(uint32_t)6584 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)6585 {6586 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;6587 if (fAccess & IEM_ACCESS_TYPE_WRITE)6588 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);6589 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);6590 }6591 6592 6593 /**6594 * iemMemMap worker that deals with a request crossing pages.6595 */6596 static VBOXSTRICTRC6597 iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,6598 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)6599 {6600 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);6601 Assert(cbMem <= GUEST_PAGE_SIZE);6602 6603 /*6604 * Do the address translations.6605 */6606 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);6607 RTGCPHYS GCPhysFirst;6608 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);6609 if (rcStrict != VINF_SUCCESS)6610 return rcStrict;6611 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));6612 6613 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;6614 RTGCPHYS GCPhysSecond;6615 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,6616 cbSecondPage, fAccess, &GCPhysSecond);6617 if (rcStrict != VINF_SUCCESS)6618 return rcStrict;6619 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);6620 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */6621 6622 PVMCC pVM = pVCpu->CTX_SUFF(pVM);6623 6624 /*6625 * Check for data breakpoints.6626 */6627 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))6628 { /* likely */ }6629 else6630 {6631 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);6632 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,6633 cbSecondPage, fAccess);6634 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);6635 if (fDataBps > 1)6636 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",6637 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));6638 }6639 6640 /*6641 * Read in the current memory content if it's a read, execute or partial6642 * write access.6643 */6644 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6645 6646 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))6647 {6648 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6649 {6650 /*6651 * Must carefully deal with access handler status codes here,6652 * makes the code a bit bloated.6653 */6654 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);6655 if (rcStrict == VINF_SUCCESS)6656 {6657 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);6658 if (rcStrict == VINF_SUCCESS)6659 { /*likely */ }6660 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6661 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6662 else6663 {6664 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",6665 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));6666 return rcStrict;6667 }6668 }6669 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6670 {6671 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);6672 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))6673 {6674 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);6675 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6676 }6677 else6678 {6679 LogEx(LOG_GROUP_IEM,6680 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",6681 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));6682 return rcStrict2;6683 }6684 }6685 else6686 {6687 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6688 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6689 return rcStrict;6690 }6691 }6692 else6693 {6694 /*6695 * No informational status codes here, much more straight forward.6696 */6697 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);6698 if (RT_SUCCESS(rc))6699 {6700 Assert(rc == VINF_SUCCESS);6701 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);6702 if (RT_SUCCESS(rc))6703 Assert(rc == VINF_SUCCESS);6704 else6705 {6706 LogEx(LOG_GROUP_IEM,6707 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));6708 return rc;6709 }6710 }6711 else6712 {6713 LogEx(LOG_GROUP_IEM,6714 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));6715 return rc;6716 }6717 }6718 }6719 #ifdef VBOX_STRICT6720 else6721 memset(pbBuf, 0xcc, cbMem);6722 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))6723 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);6724 #endif6725 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);6726 6727 /*6728 * Commit the bounce buffer entry.6729 */6730 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;6731 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;6732 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;6733 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;6734 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;6735 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;6736 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;6737 pVCpu->iem.s.iNextMapping = iMemMap + 1;6738 pVCpu->iem.s.cActiveMappings++;6739 6740 *ppvMem = pbBuf;6741 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);6742 return VINF_SUCCESS;6743 }6744 6745 6746 /**6747 * iemMemMap woker that deals with iemMemPageMap failures.6748 */6749 static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,6750 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)6751 {6752 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);6753 6754 /*6755 * Filter out conditions we can handle and the ones which shouldn't happen.6756 */6757 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE6758 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL6759 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)6760 {6761 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);6762 return rcMap;6763 }6764 pVCpu->iem.s.cPotentialExits++;6765 6766 /*6767 * Read in the current memory content if it's a read, execute or partial6768 * write access.6769 */6770 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6771 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))6772 {6773 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)6774 memset(pbBuf, 0xff, cbMem);6775 else6776 {6777 int rc;6778 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6779 {6780 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);6781 if (rcStrict == VINF_SUCCESS)6782 { /* nothing */ }6783 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6784 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6785 else6786 {6787 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6788 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6789 return rcStrict;6790 }6791 }6792 else6793 {6794 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);6795 if (RT_SUCCESS(rc))6796 { /* likely */ }6797 else6798 {6799 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6800 GCPhysFirst, rc));6801 return rc;6802 }6803 }6804 }6805 }6806 #ifdef VBOX_STRICT6807 else6808 memset(pbBuf, 0xcc, cbMem);6809 #endif6810 #ifdef VBOX_STRICT6811 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))6812 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);6813 #endif6814 6815 /*6816 * Commit the bounce buffer entry.6817 */6818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;6819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;6820 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;6821 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;6822 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;6823 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;6824 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;6825 pVCpu->iem.s.iNextMapping = iMemMap + 1;6826 pVCpu->iem.s.cActiveMappings++;6827 6828 *ppvMem = pbBuf;6829 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);6830 return VINF_SUCCESS;6831 }6832 6833 6834 6835 /**6836 * Maps the specified guest memory for the given kind of access.6837 *6838 * This may be using bounce buffering of the memory if it's crossing a page6839 * boundary or if there is an access handler installed for any of it. Because6840 * of lock prefix guarantees, we're in for some extra clutter when this6841 * happens.6842 *6843 * This may raise a \#GP, \#SS, \#PF or \#AC.6844 *6845 * @returns VBox strict status code.6846 *6847 * @param pVCpu The cross context virtual CPU structure of the calling thread.6848 * @param ppvMem Where to return the pointer to the mapped memory.6849 * @param pbUnmapInfo Where to return unmap info to be passed to6850 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when6851 * done.6852 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,6853 * 8, 12, 16, 32 or 512. When used by string operations6854 * it can be up to a page.6855 * @param iSegReg The index of the segment register to use for this6856 * access. The base and limits are checked. Use UINT8_MAX6857 * to indicate that no segmentation is required (for IDT,6858 * GDT and LDT accesses).6859 * @param GCPtrMem The address of the guest memory.6860 * @param fAccess How the memory is being accessed. The6861 * IEM_ACCESS_TYPE_XXX part is used to figure out how to6862 * map the memory, while the IEM_ACCESS_WHAT_XXX part is6863 * used when raising exceptions. The IEM_ACCESS_ATOMIC and6864 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be6865 * set.6866 * @param uAlignCtl Alignment control:6867 * - Bits 15:0 is the alignment mask.6868 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,6869 * IEM_MEMMAP_F_ALIGN_SSE, and6870 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.6871 * Pass zero to skip alignment.6872 */6873 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,6874 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT6875 {6876 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);6877 6878 /*6879 * Check the input and figure out which mapping entry to use.6880 */6881 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));6882 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 946883 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );6884 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));6885 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));6886 6887 unsigned iMemMap = pVCpu->iem.s.iNextMapping;6888 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)6889 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)6890 {6891 iMemMap = iemMemMapFindFree(pVCpu);6892 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),6893 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,6894 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,6895 pVCpu->iem.s.aMemMappings[2].fAccess),6896 VERR_IEM_IPE_9);6897 }6898 6899 /*6900 * Map the memory, checking that we can actually access it. If something6901 * slightly complicated happens, fall back on bounce buffering.6902 */6903 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);6904 if (rcStrict == VINF_SUCCESS)6905 { /* likely */ }6906 else6907 return rcStrict;6908 6909 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */6910 { /* likely */ }6911 else6912 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);6913 6914 /*6915 * Alignment check.6916 */6917 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )6918 { /* likelyish */ }6919 else6920 {6921 /* Misaligned access. */6922 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)6923 {6924 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)6925 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)6926 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )6927 {6928 AssertCompile(X86_CR0_AM == X86_EFL_AC);6929 6930 if (!iemMemAreAlignmentChecksEnabled(pVCpu))6931 { /* likely */ }6932 else6933 return iemRaiseAlignmentCheckException(pVCpu);6934 }6935 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)6936 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */6937 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU6938 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as6939 * that's what FXSAVE does on a 10980xe. */6940 && iemMemAreAlignmentChecksEnabled(pVCpu))6941 return iemRaiseAlignmentCheckException(pVCpu);6942 else6943 return iemRaiseGeneralProtectionFault0(pVCpu);6944 }6945 6946 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)6947 /* If the access is atomic there are host platform alignmnet restrictions6948 we need to conform with. */6949 if ( !(fAccess & IEM_ACCESS_ATOMIC)6950 # if defined(RT_ARCH_AMD64)6951 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */6952 # elif defined(RT_ARCH_ARM64)6953 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */6954 # else6955 # error port me6956 # endif6957 )6958 { /* okay */ }6959 else6960 {6961 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));6962 pVCpu->iem.s.cMisalignedAtomics += 1;6963 return VINF_EM_EMULATE_SPLIT_LOCK;6964 }6965 #endif6966 }6967 6968 #ifdef IEM_WITH_DATA_TLB6969 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));6970 6971 /*6972 * Get the TLB entry for this page and check PT flags.6973 *6974 * We reload the TLB entry if we need to set the dirty bit (accessed6975 * should in theory always be set).6976 */6977 uint8_t *pbMem = NULL;6978 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);6979 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);6980 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);6981 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)6982 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )6983 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)6984 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )6985 {6986 # ifdef IEM_WITH_TLB_STATISTICS6987 pVCpu->iem.s.DataTlb.cTlbCoreHits++;6988 # endif6989 6990 /* If the page is either supervisor only or non-writable, we need to do6991 more careful access checks. */6992 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))6993 {6994 /* Write to read only memory? */6995 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)6996 && (fAccess & IEM_ACCESS_TYPE_WRITE)6997 && ( ( IEM_GET_CPL(pVCpu) == 36998 && !(fAccess & IEM_ACCESS_WHAT_SYS))6999 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))7000 {7001 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));7002 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);7003 }7004 7005 /* Kernel memory accessed by userland? */7006 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)7007 && IEM_GET_CPL(pVCpu) == 37008 && !(fAccess & IEM_ACCESS_WHAT_SYS))7009 {7010 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));7011 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);7012 }7013 }7014 7015 /* Look up the physical page info if necessary. */7016 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)7017 # ifdef IN_RING37018 pbMem = pTlbe->pbMappingR3;7019 # else7020 pbMem = NULL;7021 # endif7022 else7023 {7024 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))7025 { /* likely */ }7026 else7027 IEMTlbInvalidateAllPhysicalSlow(pVCpu);7028 pTlbe->pbMappingR3 = NULL;7029 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;7030 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7031 &pbMem, &pTlbe->fFlagsAndPhysRev);7032 AssertRCReturn(rc, rc);7033 # ifdef IN_RING37034 pTlbe->pbMappingR3 = pbMem;7035 # endif7036 }7037 }7038 else7039 {7040 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;7041 7042 /* This page table walking will set A bits as required by the access while performing the walk.7043 ASSUMES these are set when the address is translated rather than on commit... */7044 /** @todo testcase: check when A bits are actually set by the CPU for code. */7045 PGMPTWALKFAST WalkFast;7046 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);7047 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);7048 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);7049 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);7050 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))7051 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);7052 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))7053 fQPage |= PGMQPAGE_F_USER_MODE;7054 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);7055 if (RT_SUCCESS(rc))7056 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);7057 else7058 {7059 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));7060 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7061 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)7062 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);7063 # endif7064 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);7065 }7066 7067 uint32_t fDataBps;7068 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))7069 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))7070 {7071 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)7072 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */7073 {7074 pTlbe--;7075 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;7076 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7077 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7078 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7079 else7080 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));7081 # endif7082 }7083 else7084 {7085 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;7086 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;7087 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7088 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7089 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7090 else7091 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);7092 # endif7093 }7094 }7095 else7096 {7097 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses7098 to the page with the data access breakpoint armed on it to pass thru here. */7099 if (fDataBps > 1)7100 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",7101 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));7102 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);7103 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;7104 pTlbe->uTag = uTagNoRev;7105 }7106 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)7107 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);7108 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;7109 pTlbe->GCPhys = GCPhysPg;7110 pTlbe->pbMappingR3 = NULL;7111 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));7112 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));7113 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)7114 || !(fAccess & IEM_ACCESS_TYPE_WRITE)7115 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);7116 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)7117 || IEM_GET_CPL(pVCpu) != 37118 || (fAccess & IEM_ACCESS_WHAT_SYS));7119 7120 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)7121 {7122 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))7123 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7124 else7125 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7126 }7127 7128 /* Resolve the physical address. */7129 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));7130 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7131 &pbMem, &pTlbe->fFlagsAndPhysRev);7132 AssertRCReturn(rc, rc);7133 # ifdef IN_RING37134 pTlbe->pbMappingR3 = pbMem;7135 # endif7136 }7137 7138 /*7139 * Check the physical page level access and mapping.7140 */7141 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))7142 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)7143 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )7144 { /* probably likely */ }7145 else7146 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,7147 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,7148 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED7149 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL7150 : VERR_PGM_PHYS_TLB_CATCH_WRITE);7151 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */7152 7153 if (pbMem)7154 {7155 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));7156 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7157 fAccess |= IEM_ACCESS_NOT_LOCKED;7158 }7159 else7160 {7161 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));7162 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7163 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7164 if (rcStrict != VINF_SUCCESS)7165 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7166 }7167 7168 void * const pvMem = pbMem;7169 7170 if (fAccess & IEM_ACCESS_TYPE_WRITE)7171 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7172 if (fAccess & IEM_ACCESS_TYPE_READ)7173 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7174 7175 #else /* !IEM_WITH_DATA_TLB */7176 7177 RTGCPHYS GCPhysFirst;7178 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);7179 if (rcStrict != VINF_SUCCESS)7180 return rcStrict;7181 7182 if (fAccess & IEM_ACCESS_TYPE_WRITE)7183 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7184 if (fAccess & IEM_ACCESS_TYPE_READ)7185 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7186 7187 void *pvMem;7188 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7189 if (rcStrict != VINF_SUCCESS)7190 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7191 7192 #endif /* !IEM_WITH_DATA_TLB */7193 7194 /*7195 * Fill in the mapping table entry.7196 */7197 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;7198 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;7199 pVCpu->iem.s.iNextMapping = iMemMap + 1;7200 pVCpu->iem.s.cActiveMappings += 1;7201 7202 *ppvMem = pvMem;7203 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7204 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);7205 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);7206 7207 return VINF_SUCCESS;7208 }7209 7210 7211 /**7212 * Commits the guest memory if bounce buffered and unmaps it.7213 *7214 * @returns Strict VBox status code.7215 * @param pVCpu The cross context virtual CPU structure of the calling thread.7216 * @param bUnmapInfo Unmap info set by iemMemMap.7217 */7218 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7219 {7220 uintptr_t const iMemMap = bUnmapInfo & 0x7;7221 AssertMsgReturn( (bUnmapInfo & 0x08)7222 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7223 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),7224 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),7225 VERR_NOT_FOUND);7226 7227 /* If it's bounce buffered, we may need to write back the buffer. */7228 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7229 {7230 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7231 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);7232 }7233 /* Otherwise unlock it. */7234 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7235 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7236 7237 /* Free the entry. */7238 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7239 Assert(pVCpu->iem.s.cActiveMappings != 0);7240 pVCpu->iem.s.cActiveMappings--;7241 return VINF_SUCCESS;7242 }7243 7244 7245 /**7246 * Rolls back the guest memory (conceptually only) and unmaps it.7247 *7248 * @param pVCpu The cross context virtual CPU structure of the calling thread.7249 * @param bUnmapInfo Unmap info set by iemMemMap.7250 */7251 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7252 {7253 uintptr_t const iMemMap = bUnmapInfo & 0x7;7254 AssertMsgReturnVoid( (bUnmapInfo & 0x08)7255 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7256 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7257 == ((unsigned)bUnmapInfo >> 4),7258 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));7259 7260 /* Unlock it if necessary. */7261 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7262 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7263 7264 /* Free the entry. */7265 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7266 Assert(pVCpu->iem.s.cActiveMappings != 0);7267 pVCpu->iem.s.cActiveMappings--;7268 }7269 7270 #ifdef IEM_WITH_SETJMP7271 7272 /**7273 * Maps the specified guest memory for the given kind of access, longjmp on7274 * error.7275 *7276 * This may be using bounce buffering of the memory if it's crossing a page7277 * boundary or if there is an access handler installed for any of it. Because7278 * of lock prefix guarantees, we're in for some extra clutter when this7279 * happens.7280 *7281 * This may raise a \#GP, \#SS, \#PF or \#AC.7282 *7283 * @returns Pointer to the mapped memory.7284 *7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.7286 * @param bUnmapInfo Where to return unmap info to be passed to7287 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,7288 * iemMemCommitAndUnmapWoSafeJmp,7289 * iemMemCommitAndUnmapRoSafeJmp,7290 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap7291 * when done.7292 * @param cbMem The number of bytes to map. This is usually 1,7293 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by7294 * string operations it can be up to a page.7295 * @param iSegReg The index of the segment register to use for7296 * this access. The base and limits are checked.7297 * Use UINT8_MAX to indicate that no segmentation7298 * is required (for IDT, GDT and LDT accesses).7299 * @param GCPtrMem The address of the guest memory.7300 * @param fAccess How the memory is being accessed. The7301 * IEM_ACCESS_TYPE_XXX part is used to figure out how to7302 * map the memory, while the IEM_ACCESS_WHAT_XXX part is7303 * used when raising exceptions. The IEM_ACCESS_ATOMIC and7304 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be7305 * set.7306 * @param uAlignCtl Alignment control:7307 * - Bits 15:0 is the alignment mask.7308 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,7309 * IEM_MEMMAP_F_ALIGN_SSE, and7310 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.7311 * Pass zero to skip alignment.7312 * @tparam a_fSafe Whether this is a call from "safe" fallback function in7313 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that7314 * needs counting as such in the statistics.7315 */7316 template<bool a_fSafeCall = false>7317 static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,7318 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP7319 {7320 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);7321 7322 /*7323 * Check the input, check segment access and adjust address7324 * with segment base.7325 */7326 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */7327 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));7328 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));7329 7330 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);7331 if (rcStrict == VINF_SUCCESS) { /*likely*/ }7332 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7333 7334 /*7335 * Alignment check.7336 */7337 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )7338 { /* likelyish */ }7339 else7340 {7341 /* Misaligned access. */7342 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)7343 {7344 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)7345 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)7346 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )7347 {7348 AssertCompile(X86_CR0_AM == X86_EFL_AC);7349 7350 if (iemMemAreAlignmentChecksEnabled(pVCpu))7351 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7352 }7353 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)7354 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */7355 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU7356 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as7357 * that's what FXSAVE does on a 10980xe. */7358 && iemMemAreAlignmentChecksEnabled(pVCpu))7359 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7360 else7361 iemRaiseGeneralProtectionFault0Jmp(pVCpu);7362 }7363 7364 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)7365 /* If the access is atomic there are host platform alignmnet restrictions7366 we need to conform with. */7367 if ( !(fAccess & IEM_ACCESS_ATOMIC)7368 # if defined(RT_ARCH_AMD64)7369 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */7370 # elif defined(RT_ARCH_ARM64)7371 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */7372 # else7373 # error port me7374 # endif7375 )7376 { /* okay */ }7377 else7378 {7379 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));7380 pVCpu->iem.s.cMisalignedAtomics += 1;7381 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);7382 }7383 #endif7384 }7385 7386 /*7387 * Figure out which mapping entry to use.7388 */7389 unsigned iMemMap = pVCpu->iem.s.iNextMapping;7390 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7391 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)7392 {7393 iMemMap = iemMemMapFindFree(pVCpu);7394 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),7395 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,7396 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,7397 pVCpu->iem.s.aMemMappings[2].fAccess),7398 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));7399 }7400 7401 /*7402 * Crossing a page boundary?7403 */7404 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)7405 { /* No (likely). */ }7406 else7407 {7408 void *pvMem;7409 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);7410 if (rcStrict == VINF_SUCCESS)7411 return pvMem;7412 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7413 }7414 7415 #ifdef IEM_WITH_DATA_TLB7416 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));7417 7418 /*7419 * Get the TLB entry for this page checking that it has the A & D bits7420 * set as per fAccess flags.7421 */7422 /** @todo make the caller pass these in with fAccess. */7423 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 37424 ? IEMTLBE_F_PT_NO_USER : 0;7425 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE7426 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY7427 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)7428 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)7429 ? IEMTLBE_F_PT_NO_WRITE : 0)7430 : 0;7431 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;7432 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);7433 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);7434 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);7435 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)7436 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )7437 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)7438 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )7439 {7440 # ifdef IEM_WITH_TLB_STATISTICS7441 if (a_fSafeCall)7442 pVCpu->iem.s.DataTlb.cTlbSafeHits++;7443 else7444 pVCpu->iem.s.DataTlb.cTlbCoreHits++;7445 # endif7446 }7447 else7448 {7449 if (a_fSafeCall)7450 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;7451 else7452 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;7453 7454 /* This page table walking will set A and D bits as required by the7455 access while performing the walk.7456 ASSUMES these are set when the address is translated rather than on commit... */7457 /** @todo testcase: check when A and D bits are actually set by the CPU. */7458 PGMPTWALKFAST WalkFast;7459 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);7460 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);7461 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);7462 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);7463 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))7464 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);7465 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))7466 fQPage |= PGMQPAGE_F_USER_MODE;7467 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);7468 if (RT_SUCCESS(rc))7469 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);7470 else7471 {7472 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));7473 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7474 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)7475 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);7476 # endif7477 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);7478 }7479 7480 uint32_t fDataBps;7481 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))7482 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))7483 {7484 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)7485 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */7486 {7487 pTlbe--;7488 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;7489 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7490 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7491 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7492 else7493 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));7494 # endif7495 }7496 else7497 {7498 if (a_fSafeCall)7499 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;7500 else7501 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;7502 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;7503 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7504 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7505 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7506 else7507 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);7508 # endif7509 }7510 }7511 else7512 {7513 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses7514 to the page with the data access breakpoint armed on it to pass thru here. */7515 if (fDataBps > 1)7516 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",7517 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));7518 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);7519 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;7520 pTlbe->uTag = uTagNoRev;7521 }7522 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)7523 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);7524 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;7525 pTlbe->GCPhys = GCPhysPg;7526 pTlbe->pbMappingR3 = NULL;7527 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));7528 Assert( !(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)7529 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);7530 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));7531 7532 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)7533 {7534 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))7535 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7536 else7537 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7538 }7539 7540 /* Resolve the physical address. */7541 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));7542 uint8_t *pbMemFullLoad = NULL;7543 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7544 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);7545 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));7546 # ifdef IN_RING37547 pTlbe->pbMappingR3 = pbMemFullLoad;7548 # endif7549 }7550 7551 /*7552 * Check the flags and physical revision.7553 * Note! This will revalidate the uTlbPhysRev after a full load. This is7554 * just to keep the code structure simple (i.e. avoid gotos or similar).7555 */7556 uint8_t *pbMem;7557 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))7558 == pVCpu->iem.s.DataTlb.uTlbPhysRev)7559 # ifdef IN_RING37560 pbMem = pTlbe->pbMappingR3;7561 # else7562 pbMem = NULL;7563 # endif7564 else7565 {7566 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));7567 7568 /*7569 * Okay, something isn't quite right or needs refreshing.7570 */7571 /* Write to read only memory? */7572 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)7573 {7574 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));7575 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7576 /** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether7577 * to trigger an \#PG or a VM nested paging exit here yet! */7578 if (Walk.fFailed & PGM_WALKFAIL_EPT)7579 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);7580 # endif7581 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);7582 }7583 7584 /* Kernel memory accessed by userland? */7585 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)7586 {7587 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));7588 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7589 /** @todo TLB: See above. */7590 if (Walk.fFailed & PGM_WALKFAIL_EPT)7591 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);7592 # endif7593 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);7594 }7595 7596 /*7597 * Check if the physical page info needs updating.7598 */7599 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)7600 # ifdef IN_RING37601 pbMem = pTlbe->pbMappingR3;7602 # else7603 pbMem = NULL;7604 # endif7605 else7606 {7607 pTlbe->pbMappingR3 = NULL;7608 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;7609 pbMem = NULL;7610 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7611 &pbMem, &pTlbe->fFlagsAndPhysRev);7612 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));7613 # ifdef IN_RING37614 pTlbe->pbMappingR3 = pbMem;7615 # endif7616 }7617 7618 /*7619 * Check the physical page level access and mapping.7620 */7621 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))7622 { /* probably likely */ }7623 else7624 {7625 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,7626 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,7627 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED7628 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL7629 : VERR_PGM_PHYS_TLB_CATCH_WRITE);7630 if (rcStrict == VINF_SUCCESS)7631 return pbMem;7632 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7633 }7634 }7635 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */7636 7637 if (pbMem)7638 {7639 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));7640 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7641 fAccess |= IEM_ACCESS_NOT_LOCKED;7642 }7643 else7644 {7645 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));7646 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7647 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7648 if (rcStrict == VINF_SUCCESS)7649 {7650 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7651 return pbMem;7652 }7653 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7654 }7655 7656 void * const pvMem = pbMem;7657 7658 if (fAccess & IEM_ACCESS_TYPE_WRITE)7659 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7660 if (fAccess & IEM_ACCESS_TYPE_READ)7661 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7662 7663 #else /* !IEM_WITH_DATA_TLB */7664 7665 7666 RTGCPHYS GCPhysFirst;7667 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);7668 if (rcStrict == VINF_SUCCESS) { /*likely*/ }7669 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7670 7671 if (fAccess & IEM_ACCESS_TYPE_WRITE)7672 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7673 if (fAccess & IEM_ACCESS_TYPE_READ)7674 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7675 7676 void *pvMem;7677 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7678 if (rcStrict == VINF_SUCCESS)7679 { /* likely */ }7680 else7681 {7682 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7683 if (rcStrict == VINF_SUCCESS)7684 return pvMem;7685 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7686 }7687 7688 #endif /* !IEM_WITH_DATA_TLB */7689 7690 /*7691 * Fill in the mapping table entry.7692 */7693 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;7694 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;7695 pVCpu->iem.s.iNextMapping = iMemMap + 1;7696 pVCpu->iem.s.cActiveMappings++;7697 7698 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7699 return pvMem;7700 }7701 7702 7703 /** @see iemMemMapJmp */7704 static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,7705 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP7706 {7707 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);7708 }7709 7710 7711 /**7712 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.7713 *7714 * @param pVCpu The cross context virtual CPU structure of the calling thread.7715 * @param pvMem The mapping.7716 * @param fAccess The kind of access.7717 */7718 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7719 {7720 uintptr_t const iMemMap = bUnmapInfo & 0x7;7721 AssertMsgReturnVoid( (bUnmapInfo & 0x08)7722 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7723 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7724 == ((unsigned)bUnmapInfo >> 4),7725 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));7726 7727 /* If it's bounce buffered, we may need to write back the buffer. */7728 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7729 {7730 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7731 {7732 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);7733 if (rcStrict == VINF_SUCCESS)7734 return;7735 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7736 }7737 }7738 /* Otherwise unlock it. */7739 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7740 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7741 7742 /* Free the entry. */7743 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7744 Assert(pVCpu->iem.s.cActiveMappings != 0);7745 pVCpu->iem.s.cActiveMappings--;7746 }7747 7748 7749 /** Fallback for iemMemCommitAndUnmapRwJmp. */7750 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7751 {7752 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));7753 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7754 }7755 7756 7757 /** Fallback for iemMemCommitAndUnmapAtJmp. */7758 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7759 {7760 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));7761 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7762 }7763 7764 7765 /** Fallback for iemMemCommitAndUnmapWoJmp. */7766 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7767 {7768 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);7769 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7770 }7771 7772 7773 /** Fallback for iemMemCommitAndUnmapRoJmp. */7774 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7775 {7776 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);7777 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7778 }7779 7780 7781 /** Fallback for iemMemRollbackAndUnmapWo. */7782 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7783 {7784 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);7785 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);7786 }7787 7788 #endif /* IEM_WITH_SETJMP */7789 7790 #ifndef IN_RING37791 /**7792 * Commits the guest memory if bounce buffered and unmaps it, if any bounce7793 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).7794 *7795 * Allows the instruction to be completed and retired, while the IEM user will7796 * return to ring-3 immediately afterwards and do the postponed writes there.7797 *7798 * @returns VBox status code (no strict statuses). Caller must check7799 * VMCPU_FF_IEM before repeating string instructions and similar stuff.7800 * @param pVCpu The cross context virtual CPU structure of the calling thread.7801 * @param pvMem The mapping.7802 * @param fAccess The kind of access.7803 */7804 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7805 {7806 uintptr_t const iMemMap = bUnmapInfo & 0x7;7807 AssertMsgReturn( (bUnmapInfo & 0x08)7808 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7809 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7810 == ((unsigned)bUnmapInfo >> 4),7811 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),7812 VERR_NOT_FOUND);7813 7814 /* If it's bounce buffered, we may need to write back the buffer. */7815 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7816 {7817 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7818 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);7819 }7820 /* Otherwise unlock it. */7821 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7822 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7823 7824 /* Free the entry. */7825 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7826 Assert(pVCpu->iem.s.cActiveMappings != 0);7827 pVCpu->iem.s.cActiveMappings--;7828 return VINF_SUCCESS;7829 }7830 #endif7831 7832 7833 /**7834 * Rollbacks mappings, releasing page locks and such.7835 *7836 * The caller shall only call this after checking cActiveMappings.7837 *7838 * @param pVCpu The cross context virtual CPU structure of the calling thread.7839 */7840 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT7841 {7842 Assert(pVCpu->iem.s.cActiveMappings > 0);7843 7844 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);7845 while (iMemMap-- > 0)7846 {7847 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;7848 if (fAccess != IEM_ACCESS_INVALID)7849 {7850 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));7851 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7852 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))7853 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7854 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,7855 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",7856 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,7857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));7858 pVCpu->iem.s.cActiveMappings--;7859 }7860 }7861 }7862 7863 7864 /*7865 * Instantiate R/W templates.7866 */7867 #define TMPL_MEM_WITH_STACK7868 7869 #define TMPL_MEM_TYPE uint8_t7870 #define TMPL_MEM_FN_SUFF U87871 #define TMPL_MEM_FMT_TYPE "%#04x"7872 #define TMPL_MEM_FMT_DESC "byte"7873 #include "IEMAllMemRWTmpl.cpp.h"7874 7875 #define TMPL_MEM_TYPE uint16_t7876 #define TMPL_MEM_FN_SUFF U167877 #define TMPL_MEM_FMT_TYPE "%#06x"7878 #define TMPL_MEM_FMT_DESC "word"7879 #include "IEMAllMemRWTmpl.cpp.h"7880 7881 #define TMPL_WITH_PUSH_SREG7882 #define TMPL_MEM_TYPE uint32_t7883 #define TMPL_MEM_FN_SUFF U327884 #define TMPL_MEM_FMT_TYPE "%#010x"7885 #define TMPL_MEM_FMT_DESC "dword"7886 #include "IEMAllMemRWTmpl.cpp.h"7887 #undef TMPL_WITH_PUSH_SREG7888 7889 #define TMPL_MEM_TYPE uint64_t7890 #define TMPL_MEM_FN_SUFF U647891 #define TMPL_MEM_FMT_TYPE "%#018RX64"7892 #define TMPL_MEM_FMT_DESC "qword"7893 #include "IEMAllMemRWTmpl.cpp.h"7894 7895 #undef TMPL_MEM_WITH_STACK7896 7897 #define TMPL_MEM_TYPE uint32_t7898 #define TMPL_MEM_TYPE_ALIGN 07899 #define TMPL_MEM_FN_SUFF U32NoAc7900 #define TMPL_MEM_FMT_TYPE "%#010x"7901 #define TMPL_MEM_FMT_DESC "dword"7902 #include "IEMAllMemRWTmpl.cpp.h"7903 #undef TMPL_WITH_PUSH_SREG7904 7905 #define TMPL_MEM_TYPE uint64_t7906 #define TMPL_MEM_TYPE_ALIGN 07907 #define TMPL_MEM_FN_SUFF U64NoAc7908 #define TMPL_MEM_FMT_TYPE "%#018RX64"7909 #define TMPL_MEM_FMT_DESC "qword"7910 #include "IEMAllMemRWTmpl.cpp.h"7911 7912 #define TMPL_MEM_TYPE uint64_t7913 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)7914 #define TMPL_MEM_FN_SUFF U64AlignedU1287915 #define TMPL_MEM_FMT_TYPE "%#018RX64"7916 #define TMPL_MEM_FMT_DESC "qword"7917 #include "IEMAllMemRWTmpl.cpp.h"7918 7919 /* See IEMAllMemRWTmplInline.cpp.h */7920 #define TMPL_MEM_BY_REF7921 7922 #define TMPL_MEM_TYPE RTFLOAT80U7923 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)7924 #define TMPL_MEM_FN_SUFF R807925 #define TMPL_MEM_FMT_TYPE "%.10Rhxs"7926 #define TMPL_MEM_FMT_DESC "tword"7927 #include "IEMAllMemRWTmpl.cpp.h"7928 7929 #define TMPL_MEM_TYPE RTPBCD80U7930 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */7931 #define TMPL_MEM_FN_SUFF D807932 #define TMPL_MEM_FMT_TYPE "%.10Rhxs"7933 #define TMPL_MEM_FMT_DESC "tword"7934 #include "IEMAllMemRWTmpl.cpp.h"7935 7936 #define TMPL_MEM_TYPE RTUINT128U7937 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)7938 #define TMPL_MEM_FN_SUFF U1287939 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7940 #define TMPL_MEM_FMT_DESC "dqword"7941 #include "IEMAllMemRWTmpl.cpp.h"7942 7943 #define TMPL_MEM_TYPE RTUINT128U7944 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)7945 #define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)7946 #define TMPL_MEM_FN_SUFF U128AlignedSse7947 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7948 #define TMPL_MEM_FMT_DESC "dqword"7949 #include "IEMAllMemRWTmpl.cpp.h"7950 7951 #define TMPL_MEM_TYPE RTUINT128U7952 #define TMPL_MEM_TYPE_ALIGN 07953 #define TMPL_MEM_FN_SUFF U128NoAc7954 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7955 #define TMPL_MEM_FMT_DESC "dqword"7956 #include "IEMAllMemRWTmpl.cpp.h"7957 7958 #define TMPL_MEM_TYPE RTUINT256U7959 #define TMPL_MEM_TYPE_ALIGN 07960 #define TMPL_MEM_FN_SUFF U256NoAc7961 #define TMPL_MEM_FMT_TYPE "%.32Rhxs"7962 #define TMPL_MEM_FMT_DESC "qqword"7963 #include "IEMAllMemRWTmpl.cpp.h"7964 7965 #define TMPL_MEM_TYPE RTUINT256U7966 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)7967 #define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP7968 #define TMPL_MEM_FN_SUFF U256AlignedAvx7969 #define TMPL_MEM_FMT_TYPE "%.32Rhxs"7970 #define TMPL_MEM_FMT_DESC "qqword"7971 #include "IEMAllMemRWTmpl.cpp.h"7972 7973 /**7974 * Fetches a data dword and zero extends it to a qword.7975 *7976 * @returns Strict VBox status code.7977 * @param pVCpu The cross context virtual CPU structure of the calling thread.7978 * @param pu64Dst Where to return the qword.7979 * @param iSegReg The index of the segment register to use for7980 * this access. The base and limits are checked.7981 * @param GCPtrMem The address of the guest memory.7982 */7983 VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT7984 {7985 /* The lazy approach for now... */7986 uint8_t bUnmapInfo;7987 uint32_t const *pu32Src;7988 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,7989 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);7990 if (rc == VINF_SUCCESS)7991 {7992 *pu64Dst = *pu32Src;7993 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);7994 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));7995 }7996 return rc;7997 }7998 7999 8000 #ifdef SOME_UNUSED_FUNCTION8001 /**8002 * Fetches a data dword and sign extends it to a qword.8003 *8004 * @returns Strict VBox status code.8005 * @param pVCpu The cross context virtual CPU structure of the calling thread.8006 * @param pu64Dst Where to return the sign extended value.8007 * @param iSegReg The index of the segment register to use for8008 * this access. The base and limits are checked.8009 * @param GCPtrMem The address of the guest memory.8010 */8011 VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8012 {8013 /* The lazy approach for now... */8014 uint8_t bUnmapInfo;8015 int32_t const *pi32Src;8016 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,8017 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);8018 if (rc == VINF_SUCCESS)8019 {8020 *pu64Dst = *pi32Src;8021 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8022 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));8023 }8024 #ifdef __GNUC__ /* warning: GCC may be a royal pain */8025 else8026 *pu64Dst = 0;8027 #endif8028 return rc;8029 }8030 #endif8031 8032 8033 /**8034 * Fetches a descriptor register (lgdt, lidt).8035 *8036 * @returns Strict VBox status code.8037 * @param pVCpu The cross context virtual CPU structure of the calling thread.8038 * @param pcbLimit Where to return the limit.8039 * @param pGCPtrBase Where to return the base.8040 * @param iSegReg The index of the segment register to use for8041 * this access. The base and limits are checked.8042 * @param GCPtrMem The address of the guest memory.8043 * @param enmOpSize The effective operand size.8044 */8045 VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,8046 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT8047 {8048 /*8049 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a8050 * little special:8051 * - The two reads are done separately.8052 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.8053 * - We suspect the 386 to actually commit the limit before the base in8054 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We8055 * don't try emulate this eccentric behavior, because it's not well8056 * enough understood and rather hard to trigger.8057 * - The 486 seems to do a dword limit read when the operand size is 32-bit.8058 */8059 VBOXSTRICTRC rcStrict;8060 if (IEM_IS_64BIT_CODE(pVCpu))8061 {8062 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8063 if (rcStrict == VINF_SUCCESS)8064 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);8065 }8066 else8067 {8068 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */8069 if (enmOpSize == IEMMODE_32BIT)8070 {8071 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)8072 {8073 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8074 if (rcStrict == VINF_SUCCESS)8075 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8076 }8077 else8078 {8079 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);8080 if (rcStrict == VINF_SUCCESS)8081 {8082 *pcbLimit = (uint16_t)uTmp;8083 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8084 }8085 }8086 if (rcStrict == VINF_SUCCESS)8087 *pGCPtrBase = uTmp;8088 }8089 else8090 {8091 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8092 if (rcStrict == VINF_SUCCESS)8093 {8094 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8095 if (rcStrict == VINF_SUCCESS)8096 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);8097 }8098 }8099 }8100 return rcStrict;8101 }8102 8103 8104 /**8105 * Stores a data dqword, SSE aligned.8106 *8107 * @returns Strict VBox status code.8108 * @param pVCpu The cross context virtual CPU structure of the calling thread.8109 * @param iSegReg The index of the segment register to use for8110 * this access. The base and limits are checked.8111 * @param GCPtrMem The address of the guest memory.8112 * @param u128Value The value to store.8113 */8114 VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT8115 {8116 /* The lazy approach for now... */8117 uint8_t bUnmapInfo;8118 PRTUINT128U pu128Dst;8119 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,8120 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);8121 if (rc == VINF_SUCCESS)8122 {8123 pu128Dst->au64[0] = u128Value.au64[0];8124 pu128Dst->au64[1] = u128Value.au64[1];8125 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8126 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));8127 }8128 return rc;8129 }8130 8131 8132 #ifdef IEM_WITH_SETJMP8133 /**8134 * Stores a data dqword, SSE aligned.8135 *8136 * @returns Strict VBox status code.8137 * @param pVCpu The cross context virtual CPU structure of the calling thread.8138 * @param iSegReg The index of the segment register to use for8139 * this access. The base and limits are checked.8140 * @param GCPtrMem The address of the guest memory.8141 * @param u128Value The value to store.8142 */8143 void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,8144 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP8145 {8146 /* The lazy approach for now... */8147 uint8_t bUnmapInfo;8148 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,8149 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);8150 pu128Dst->au64[0] = u128Value.au64[0];8151 pu128Dst->au64[1] = u128Value.au64[1];8152 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);8153 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));8154 }8155 #endif8156 8157 8158 /**8159 * Stores a data dqword.8160 *8161 * @returns Strict VBox status code.8162 * @param pVCpu The cross context virtual CPU structure of the calling thread.8163 * @param iSegReg The index of the segment register to use for8164 * this access. The base and limits are checked.8165 * @param GCPtrMem The address of the guest memory.8166 * @param pu256Value Pointer to the value to store.8167 */8168 VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT8169 {8170 /* The lazy approach for now... */8171 uint8_t bUnmapInfo;8172 PRTUINT256U pu256Dst;8173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,8174 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);8175 if (rc == VINF_SUCCESS)8176 {8177 pu256Dst->au64[0] = pu256Value->au64[0];8178 pu256Dst->au64[1] = pu256Value->au64[1];8179 pu256Dst->au64[2] = pu256Value->au64[2];8180 pu256Dst->au64[3] = pu256Value->au64[3];8181 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8182 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));8183 }8184 return rc;8185 }8186 8187 8188 #ifdef IEM_WITH_SETJMP8189 /**8190 * Stores a data dqword, longjmp on error.8191 *8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.8193 * @param iSegReg The index of the segment register to use for8194 * this access. The base and limits are checked.8195 * @param GCPtrMem The address of the guest memory.8196 * @param pu256Value Pointer to the value to store.8197 */8198 void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP8199 {8200 /* The lazy approach for now... */8201 uint8_t bUnmapInfo;8202 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,8203 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);8204 pu256Dst->au64[0] = pu256Value->au64[0];8205 pu256Dst->au64[1] = pu256Value->au64[1];8206 pu256Dst->au64[2] = pu256Value->au64[2];8207 pu256Dst->au64[3] = pu256Value->au64[3];8208 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);8209 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));8210 }8211 #endif8212 8213 8214 /**8215 * Stores a descriptor register (sgdt, sidt).8216 *8217 * @returns Strict VBox status code.8218 * @param pVCpu The cross context virtual CPU structure of the calling thread.8219 * @param cbLimit The limit.8220 * @param GCPtrBase The base address.8221 * @param iSegReg The index of the segment register to use for8222 * this access. The base and limits are checked.8223 * @param GCPtrMem The address of the guest memory.8224 */8225 VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8226 {8227 /*8228 * The SIDT and SGDT instructions actually stores the data using two8229 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions8230 * does not respond to opsize prefixes.8231 */8232 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);8233 if (rcStrict == VINF_SUCCESS)8234 {8235 if (IEM_IS_16BIT_CODE(pVCpu))8236 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,8237 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_2868238 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);8239 else if (IEM_IS_32BIT_CODE(pVCpu))8240 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);8241 else8242 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);8243 }8244 return rcStrict;8245 }8246 8247 8248 /**8249 * Begin a special stack push (used by interrupt, exceptions and such).8250 *8251 * This will raise \#SS or \#PF if appropriate.8252 *8253 * @returns Strict VBox status code.8254 * @param pVCpu The cross context virtual CPU structure of the calling thread.8255 * @param cbMem The number of bytes to push onto the stack.8256 * @param cbAlign The alignment mask (7, 3, 1).8257 * @param ppvMem Where to return the pointer to the stack memory.8258 * As with the other memory functions this could be8259 * direct access or bounce buffered access, so8260 * don't commit register until the commit call8261 * succeeds.8262 * @param pbUnmapInfo Where to store unmap info for8263 * iemMemStackPushCommitSpecial.8264 * @param puNewRsp Where to return the new RSP value. This must be8265 * passed unchanged to8266 * iemMemStackPushCommitSpecial().8267 */8268 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,8269 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT8270 {8271 Assert(cbMem < UINT8_MAX);8272 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);8273 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);8274 }8275 8276 8277 /**8278 * Commits a special stack push (started by iemMemStackPushBeginSpecial).8279 *8280 * This will update the rSP.8281 *8282 * @returns Strict VBox status code.8283 * @param pVCpu The cross context virtual CPU structure of the calling thread.8284 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.8285 * @param uNewRsp The new RSP value returned by8286 * iemMemStackPushBeginSpecial().8287 */8288 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT8289 {8290 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8291 if (rcStrict == VINF_SUCCESS)8292 pVCpu->cpum.GstCtx.rsp = uNewRsp;8293 return rcStrict;8294 }8295 8296 8297 /**8298 * Begin a special stack pop (used by iret, retf and such).8299 *8300 * This will raise \#SS or \#PF if appropriate.8301 *8302 * @returns Strict VBox status code.8303 * @param pVCpu The cross context virtual CPU structure of the calling thread.8304 * @param cbMem The number of bytes to pop from the stack.8305 * @param cbAlign The alignment mask (7, 3, 1).8306 * @param ppvMem Where to return the pointer to the stack memory.8307 * @param pbUnmapInfo Where to store unmap info for8308 * iemMemStackPopDoneSpecial.8309 * @param puNewRsp Where to return the new RSP value. This must be8310 * assigned to CPUMCTX::rsp manually some time8311 * after iemMemStackPopDoneSpecial() has been8312 * called.8313 */8314 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,8315 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT8316 {8317 Assert(cbMem < UINT8_MAX);8318 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);8319 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);8320 }8321 8322 8323 /**8324 * Continue a special stack pop (used by iret and retf), for the purpose of8325 * retrieving a new stack pointer.8326 *8327 * This will raise \#SS or \#PF if appropriate.8328 *8329 * @returns Strict VBox status code.8330 * @param pVCpu The cross context virtual CPU structure of the calling thread.8331 * @param off Offset from the top of the stack. This is zero8332 * except in the retf case.8333 * @param cbMem The number of bytes to pop from the stack.8334 * @param ppvMem Where to return the pointer to the stack memory.8335 * @param pbUnmapInfo Where to store unmap info for8336 * iemMemStackPopDoneSpecial.8337 * @param uCurNewRsp The current uncommitted RSP value. (No need to8338 * return this because all use of this function is8339 * to retrieve a new value and anything we return8340 * here would be discarded.)8341 */8342 VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,8343 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT8344 {8345 Assert(cbMem < UINT8_MAX);8346 8347 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */8348 RTGCPTR GCPtrTop;8349 if (IEM_IS_64BIT_CODE(pVCpu))8350 GCPtrTop = uCurNewRsp;8351 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)8352 GCPtrTop = (uint32_t)uCurNewRsp;8353 else8354 GCPtrTop = (uint16_t)uCurNewRsp;8355 8356 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,8357 0 /* checked in iemMemStackPopBeginSpecial */);8358 }8359 8360 8361 /**8362 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or8363 * iemMemStackPopContinueSpecial).8364 *8365 * The caller will manually commit the rSP.8366 *8367 * @returns Strict VBox status code.8368 * @param pVCpu The cross context virtual CPU structure of the calling thread.8369 * @param bUnmapInfo Unmap information returned by8370 * iemMemStackPopBeginSpecial() or8371 * iemMemStackPopContinueSpecial().8372 */8373 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT8374 {8375 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8376 }8377 8378 8379 /**8380 * Fetches a system table byte.8381 *8382 * @returns Strict VBox status code.8383 * @param pVCpu The cross context virtual CPU structure of the calling thread.8384 * @param pbDst Where to return the byte.8385 * @param iSegReg The index of the segment register to use for8386 * this access. The base and limits are checked.8387 * @param GCPtrMem The address of the guest memory.8388 */8389 VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8390 {8391 /* The lazy approach for now... */8392 uint8_t bUnmapInfo;8393 uint8_t const *pbSrc;8394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8395 if (rc == VINF_SUCCESS)8396 {8397 *pbDst = *pbSrc;8398 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8399 }8400 return rc;8401 }8402 8403 8404 /**8405 * Fetches a system table word.8406 *8407 * @returns Strict VBox status code.8408 * @param pVCpu The cross context virtual CPU structure of the calling thread.8409 * @param pu16Dst Where to return the word.8410 * @param iSegReg The index of the segment register to use for8411 * this access. The base and limits are checked.8412 * @param GCPtrMem The address of the guest memory.8413 */8414 VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8415 {8416 /* The lazy approach for now... */8417 uint8_t bUnmapInfo;8418 uint16_t const *pu16Src;8419 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8420 if (rc == VINF_SUCCESS)8421 {8422 *pu16Dst = *pu16Src;8423 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8424 }8425 return rc;8426 }8427 8428 8429 /**8430 * Fetches a system table dword.8431 *8432 * @returns Strict VBox status code.8433 * @param pVCpu The cross context virtual CPU structure of the calling thread.8434 * @param pu32Dst Where to return the dword.8435 * @param iSegReg The index of the segment register to use for8436 * this access. The base and limits are checked.8437 * @param GCPtrMem The address of the guest memory.8438 */8439 VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8440 {8441 /* The lazy approach for now... */8442 uint8_t bUnmapInfo;8443 uint32_t const *pu32Src;8444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8445 if (rc == VINF_SUCCESS)8446 {8447 *pu32Dst = *pu32Src;8448 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8449 }8450 return rc;8451 }8452 8453 8454 /**8455 * Fetches a system table qword.8456 *8457 * @returns Strict VBox status code.8458 * @param pVCpu The cross context virtual CPU structure of the calling thread.8459 * @param pu64Dst Where to return the qword.8460 * @param iSegReg The index of the segment register to use for8461 * this access. The base and limits are checked.8462 * @param GCPtrMem The address of the guest memory.8463 */8464 VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8465 {8466 /* The lazy approach for now... */8467 uint8_t bUnmapInfo;8468 uint64_t const *pu64Src;8469 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8470 if (rc == VINF_SUCCESS)8471 {8472 *pu64Dst = *pu64Src;8473 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8474 }8475 return rc;8476 }8477 8478 8479 /**8480 * Fetches a descriptor table entry with caller specified error code.8481 *8482 * @returns Strict VBox status code.8483 * @param pVCpu The cross context virtual CPU structure of the calling thread.8484 * @param pDesc Where to return the descriptor table entry.8485 * @param uSel The selector which table entry to fetch.8486 * @param uXcpt The exception to raise on table lookup error.8487 * @param uErrorCode The error code associated with the exception.8488 */8489 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,8490 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT8491 {8492 AssertPtr(pDesc);8493 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);8494 8495 /** @todo did the 286 require all 8 bytes to be accessible? */8496 /*8497 * Get the selector table base and check bounds.8498 */8499 RTGCPTR GCPtrBase;8500 if (uSel & X86_SEL_LDT)8501 {8502 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present8503 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )8504 {8505 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",8506 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));8507 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,8508 uErrorCode, 0);8509 }8510 8511 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);8512 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;8513 }8514 else8515 {8516 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)8517 {8518 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));8519 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,8520 uErrorCode, 0);8521 }8522 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;8523 }8524 8525 /*8526 * Read the legacy descriptor and maybe the long mode extensions if8527 * required.8528 */8529 VBOXSTRICTRC rcStrict;8530 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)8531 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));8532 else8533 {8534 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);8535 if (rcStrict == VINF_SUCCESS)8536 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);8537 if (rcStrict == VINF_SUCCESS)8538 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);8539 if (rcStrict == VINF_SUCCESS)8540 pDesc->Legacy.au16[3] = 0;8541 else8542 return rcStrict;8543 }8544 8545 if (rcStrict == VINF_SUCCESS)8546 {8547 if ( !IEM_IS_LONG_MODE(pVCpu)8548 || pDesc->Legacy.Gen.u1DescType)8549 pDesc->Long.au64[1] = 0;8550 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 88551 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))8552 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);8553 else8554 {8555 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));8556 /** @todo is this the right exception? */8557 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);8558 }8559 }8560 return rcStrict;8561 }8562 8563 8564 /**8565 * Fetches a descriptor table entry.8566 *8567 * @returns Strict VBox status code.8568 * @param pVCpu The cross context virtual CPU structure of the calling thread.8569 * @param pDesc Where to return the descriptor table entry.8570 * @param uSel The selector which table entry to fetch.8571 * @param uXcpt The exception to raise on table lookup error.8572 */8573 VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT8574 {8575 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);8576 }8577 8578 8579 /**8580 * Marks the selector descriptor as accessed (only non-system descriptors).8581 *8582 * This function ASSUMES that iemMemFetchSelDesc has be called previously and8583 * will therefore skip the limit checks.8584 *8585 * @returns Strict VBox status code.8586 * @param pVCpu The cross context virtual CPU structure of the calling thread.8587 * @param uSel The selector.8588 */8589 VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT8590 {8591 /*8592 * Get the selector table base and calculate the entry address.8593 */8594 RTGCPTR GCPtr = uSel & X86_SEL_LDT8595 ? pVCpu->cpum.GstCtx.ldtr.u64Base8596 : pVCpu->cpum.GstCtx.gdtr.pGdt;8597 GCPtr += uSel & X86_SEL_MASK;8598 8599 /*8600 * ASMAtomicBitSet will assert if the address is misaligned, so do some8601 * ugly stuff to avoid this. This will make sure it's an atomic access8602 * as well more or less remove any question about 8-bit or 32-bit accesss.8603 */8604 VBOXSTRICTRC rcStrict;8605 uint8_t bUnmapInfo;8606 uint32_t volatile *pu32;8607 if ((GCPtr & 3) == 0)8608 {8609 /* The normal case, map the 32-bit bits around the accessed bit (40). */8610 GCPtr += 2 + 2;8611 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8612 if (rcStrict != VINF_SUCCESS)8613 return rcStrict;8614 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */8615 }8616 else8617 {8618 /* The misaligned GDT/LDT case, map the whole thing. */8619 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8620 if (rcStrict != VINF_SUCCESS)8621 return rcStrict;8622 switch ((uintptr_t)pu32 & 3)8623 {8624 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;8625 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;8626 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;8627 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;8628 }8629 }8630 8631 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8632 }8633 8634 8635 #undef LOG_GROUP8636 #define LOG_GROUP LOG_GROUP_IEM8637 8638 /** @} */8639 8640 /** @name Opcode Helpers.8641 * @{8642 */8643 8644 /**8645 * Calculates the effective address of a ModR/M memory operand.8646 *8647 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.8648 *8649 * @return Strict VBox status code.8650 * @param pVCpu The cross context virtual CPU structure of the calling thread.8651 * @param bRm The ModRM byte.8652 * @param cbImmAndRspOffset - First byte: The size of any immediate8653 * following the effective address opcode bytes8654 * (only for RIP relative addressing).8655 * - Second byte: RSP displacement (for POP [ESP]).8656 * @param pGCPtrEff Where to return the effective address.8657 */8658 VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT8659 {8660 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));8661 # define SET_SS_DEF() \8662 do \8663 { \8664 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \8665 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \8666 } while (0)8667 8668 if (!IEM_IS_64BIT_CODE(pVCpu))8669 {8670 /** @todo Check the effective address size crap! */8671 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)8672 {8673 uint16_t u16EffAddr;8674 8675 /* Handle the disp16 form with no registers first. */8676 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)8677 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);8678 else8679 {8680 /* Get the displacment. */8681 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8682 {8683 case 0: u16EffAddr = 0; break;8684 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;8685 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;8686 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */8687 }8688 8689 /* Add the base and index registers to the disp. */8690 switch (bRm & X86_MODRM_RM_MASK)8691 {8692 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;8693 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;8694 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;8695 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;8696 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;8697 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;8698 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;8699 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;8700 }8701 }8702 8703 *pGCPtrEff = u16EffAddr;8704 }8705 else8706 {8707 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8708 uint32_t u32EffAddr;8709 8710 /* Handle the disp32 form with no registers first. */8711 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)8712 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);8713 else8714 {8715 /* Get the register (or SIB) value. */8716 switch ((bRm & X86_MODRM_RM_MASK))8717 {8718 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;8719 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;8720 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;8721 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;8722 case 4: /* SIB */8723 {8724 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);8725 8726 /* Get the index and scale it. */8727 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)8728 {8729 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;8730 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;8731 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;8732 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;8733 case 4: u32EffAddr = 0; /*none */ break;8734 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;8735 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;8736 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;8737 IEM_NOT_REACHED_DEFAULT_CASE_RET();8738 }8739 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;8740 8741 /* add base */8742 switch (bSib & X86_SIB_BASE_MASK)8743 {8744 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;8745 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;8746 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;8747 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;8748 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;8749 case 5:8750 if ((bRm & X86_MODRM_MOD_MASK) != 0)8751 {8752 u32EffAddr += pVCpu->cpum.GstCtx.ebp;8753 SET_SS_DEF();8754 }8755 else8756 {8757 uint32_t u32Disp;8758 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8759 u32EffAddr += u32Disp;8760 }8761 break;8762 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;8763 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;8764 IEM_NOT_REACHED_DEFAULT_CASE_RET();8765 }8766 break;8767 }8768 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;8769 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;8770 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;8771 IEM_NOT_REACHED_DEFAULT_CASE_RET();8772 }8773 8774 /* Get and add the displacement. */8775 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8776 {8777 case 0:8778 break;8779 case 1:8780 {8781 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);8782 u32EffAddr += i8Disp;8783 break;8784 }8785 case 2:8786 {8787 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);8788 u32EffAddr += u32Disp;8789 break;8790 }8791 default:8792 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */8793 }8794 8795 }8796 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8797 *pGCPtrEff = u32EffAddr;8798 }8799 }8800 else8801 {8802 uint64_t u64EffAddr;8803 8804 /* Handle the rip+disp32 form with no registers first. */8805 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)8806 {8807 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);8808 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));8809 }8810 else8811 {8812 /* Get the register (or SIB) value. */8813 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)8814 {8815 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;8816 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;8817 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;8818 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;8819 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;8820 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;8821 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;8822 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;8823 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;8824 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;8825 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;8826 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;8827 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;8828 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;8829 /* SIB */8830 case 4:8831 case 12:8832 {8833 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);8834 8835 /* Get the index and scale it. */8836 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)8837 {8838 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;8839 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;8840 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;8841 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;8842 case 4: u64EffAddr = 0; /*none */ break;8843 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;8844 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;8845 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;8846 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;8847 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;8848 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;8849 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;8850 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;8851 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;8852 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;8853 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;8854 IEM_NOT_REACHED_DEFAULT_CASE_RET();8855 }8856 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;8857 8858 /* add base */8859 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)8860 {8861 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;8862 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;8863 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;8864 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;8865 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;8866 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;8867 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;8868 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;8869 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;8870 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;8871 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;8872 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;8873 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;8874 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;8875 /* complicated encodings */8876 case 5:8877 case 13:8878 if ((bRm & X86_MODRM_MOD_MASK) != 0)8879 {8880 if (!pVCpu->iem.s.uRexB)8881 {8882 u64EffAddr += pVCpu->cpum.GstCtx.rbp;8883 SET_SS_DEF();8884 }8885 else8886 u64EffAddr += pVCpu->cpum.GstCtx.r13;8887 }8888 else8889 {8890 uint32_t u32Disp;8891 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8892 u64EffAddr += (int32_t)u32Disp;8893 }8894 break;8895 IEM_NOT_REACHED_DEFAULT_CASE_RET();8896 }8897 break;8898 }8899 IEM_NOT_REACHED_DEFAULT_CASE_RET();8900 }8901 8902 /* Get and add the displacement. */8903 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8904 {8905 case 0:8906 break;8907 case 1:8908 {8909 int8_t i8Disp;8910 IEM_OPCODE_GET_NEXT_S8(&i8Disp);8911 u64EffAddr += i8Disp;8912 break;8913 }8914 case 2:8915 {8916 uint32_t u32Disp;8917 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8918 u64EffAddr += (int32_t)u32Disp;8919 break;8920 }8921 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */8922 }8923 8924 }8925 8926 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)8927 *pGCPtrEff = u64EffAddr;8928 else8929 {8930 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8931 *pGCPtrEff = u64EffAddr & UINT32_MAX;8932 }8933 }8934 8935 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));8936 return VINF_SUCCESS;8937 }8938 8939 8940 #ifdef IEM_WITH_SETJMP8941 /**8942 * Calculates the effective address of a ModR/M memory operand.8943 *8944 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.8945 *8946 * May longjmp on internal error.8947 *8948 * @return The effective address.8949 * @param pVCpu The cross context virtual CPU structure of the calling thread.8950 * @param bRm The ModRM byte.8951 * @param cbImmAndRspOffset - First byte: The size of any immediate8952 * following the effective address opcode bytes8953 * (only for RIP relative addressing).8954 * - Second byte: RSP displacement (for POP [ESP]).8955 */8956 RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP8957 {8958 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));8959 # define SET_SS_DEF() \8960 do \8961 { \8962 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \8963 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \8964 } while (0)8965 8966 if (!IEM_IS_64BIT_CODE(pVCpu))8967 {8968 /** @todo Check the effective address size crap! */8969 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)8970 {8971 uint16_t u16EffAddr;8972 8973 /* Handle the disp16 form with no registers first. */8974 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)8975 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);8976 else8977 {8978 /* Get the displacment. */8979 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8980 {8981 case 0: u16EffAddr = 0; break;8982 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;8983 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;8984 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */8985 }8986 8987 /* Add the base and index registers to the disp. */8988 switch (bRm & X86_MODRM_RM_MASK)8989 {8990 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;8991 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;8992 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;8993 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;8994 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;8995 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;8996 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;8997 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;8998 }8999 }9000 9001 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));9002 return u16EffAddr;9003 }9004 9005 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9006 uint32_t u32EffAddr;9007 9008 /* Handle the disp32 form with no registers first. */9009 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9010 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);9011 else9012 {9013 /* Get the register (or SIB) value. */9014 switch ((bRm & X86_MODRM_RM_MASK))9015 {9016 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9017 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9018 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9019 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9020 case 4: /* SIB */9021 {9022 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9023 9024 /* Get the index and scale it. */9025 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)9026 {9027 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9028 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9029 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9030 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9031 case 4: u32EffAddr = 0; /*none */ break;9032 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;9033 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9034 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9035 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9036 }9037 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9038 9039 /* add base */9040 switch (bSib & X86_SIB_BASE_MASK)9041 {9042 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;9043 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;9044 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;9045 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;9046 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9047 case 5:9048 if ((bRm & X86_MODRM_MOD_MASK) != 0)9049 {9050 u32EffAddr += pVCpu->cpum.GstCtx.ebp;9051 SET_SS_DEF();9052 }9053 else9054 {9055 uint32_t u32Disp;9056 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9057 u32EffAddr += u32Disp;9058 }9059 break;9060 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;9061 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;9062 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9063 }9064 break;9065 }9066 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;9067 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9068 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9069 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9070 }9071 9072 /* Get and add the displacement. */9073 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9074 {9075 case 0:9076 break;9077 case 1:9078 {9079 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);9080 u32EffAddr += i8Disp;9081 break;9082 }9083 case 2:9084 {9085 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);9086 u32EffAddr += u32Disp;9087 break;9088 }9089 default:9090 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */9091 }9092 }9093 9094 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9095 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));9096 return u32EffAddr;9097 }9098 9099 uint64_t u64EffAddr;9100 9101 /* Handle the rip+disp32 form with no registers first. */9102 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9103 {9104 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);9105 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));9106 }9107 else9108 {9109 /* Get the register (or SIB) value. */9110 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)9111 {9112 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9113 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9114 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9115 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9116 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;9117 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9118 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9119 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9120 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9121 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9122 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9123 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9124 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9125 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9126 /* SIB */9127 case 4:9128 case 12:9129 {9130 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9131 9132 /* Get the index and scale it. */9133 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)9134 {9135 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9136 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9137 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9138 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9139 case 4: u64EffAddr = 0; /*none */ break;9140 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;9141 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9142 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9143 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9144 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9145 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9146 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9147 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;9148 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9149 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9150 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9151 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9152 }9153 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9154 9155 /* add base */9156 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)9157 {9158 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;9159 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;9160 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;9161 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;9162 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9163 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;9164 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;9165 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;9166 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;9167 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;9168 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;9169 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;9170 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;9171 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;9172 /* complicated encodings */9173 case 5:9174 case 13:9175 if ((bRm & X86_MODRM_MOD_MASK) != 0)9176 {9177 if (!pVCpu->iem.s.uRexB)9178 {9179 u64EffAddr += pVCpu->cpum.GstCtx.rbp;9180 SET_SS_DEF();9181 }9182 else9183 u64EffAddr += pVCpu->cpum.GstCtx.r13;9184 }9185 else9186 {9187 uint32_t u32Disp;9188 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9189 u64EffAddr += (int32_t)u32Disp;9190 }9191 break;9192 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9193 }9194 break;9195 }9196 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9197 }9198 9199 /* Get and add the displacement. */9200 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9201 {9202 case 0:9203 break;9204 case 1:9205 {9206 int8_t i8Disp;9207 IEM_OPCODE_GET_NEXT_S8(&i8Disp);9208 u64EffAddr += i8Disp;9209 break;9210 }9211 case 2:9212 {9213 uint32_t u32Disp;9214 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9215 u64EffAddr += (int32_t)u32Disp;9216 break;9217 }9218 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */9219 }9220 9221 }9222 9223 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)9224 {9225 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));9226 return u64EffAddr;9227 }9228 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9229 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));9230 return u64EffAddr & UINT32_MAX;9231 }9232 #endif /* IEM_WITH_SETJMP */9233 9234 9235 /**9236 * Calculates the effective address of a ModR/M memory operand, extended version9237 * for use in the recompilers.9238 *9239 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.9240 *9241 * @return Strict VBox status code.9242 * @param pVCpu The cross context virtual CPU structure of the calling thread.9243 * @param bRm The ModRM byte.9244 * @param cbImmAndRspOffset - First byte: The size of any immediate9245 * following the effective address opcode bytes9246 * (only for RIP relative addressing).9247 * - Second byte: RSP displacement (for POP [ESP]).9248 * @param pGCPtrEff Where to return the effective address.9249 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and9250 * SIB byte (bits 39:32).9251 */9252 VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT9253 {9254 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));9255 # define SET_SS_DEF() \9256 do \9257 { \9258 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \9259 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \9260 } while (0)9261 9262 uint64_t uInfo;9263 if (!IEM_IS_64BIT_CODE(pVCpu))9264 {9265 /** @todo Check the effective address size crap! */9266 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)9267 {9268 uint16_t u16EffAddr;9269 9270 /* Handle the disp16 form with no registers first. */9271 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)9272 {9273 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);9274 uInfo = u16EffAddr;9275 }9276 else9277 {9278 /* Get the displacment. */9279 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9280 {9281 case 0: u16EffAddr = 0; break;9282 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;9283 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;9284 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */9285 }9286 uInfo = u16EffAddr;9287 9288 /* Add the base and index registers to the disp. */9289 switch (bRm & X86_MODRM_RM_MASK)9290 {9291 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;9292 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;9293 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;9294 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;9295 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;9296 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;9297 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;9298 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;9299 }9300 }9301 9302 *pGCPtrEff = u16EffAddr;9303 }9304 else9305 {9306 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9307 uint32_t u32EffAddr;9308 9309 /* Handle the disp32 form with no registers first. */9310 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9311 {9312 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);9313 uInfo = u32EffAddr;9314 }9315 else9316 {9317 /* Get the register (or SIB) value. */9318 uInfo = 0;9319 switch ((bRm & X86_MODRM_RM_MASK))9320 {9321 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9322 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9323 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9324 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9325 case 4: /* SIB */9326 {9327 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9328 uInfo = (uint64_t)bSib << 32;9329 9330 /* Get the index and scale it. */9331 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)9332 {9333 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9334 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9335 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9336 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9337 case 4: u32EffAddr = 0; /*none */ break;9338 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;9339 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9340 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9341 IEM_NOT_REACHED_DEFAULT_CASE_RET();9342 }9343 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9344 9345 /* add base */9346 switch (bSib & X86_SIB_BASE_MASK)9347 {9348 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;9349 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;9350 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;9351 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;9352 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9353 case 5:9354 if ((bRm & X86_MODRM_MOD_MASK) != 0)9355 {9356 u32EffAddr += pVCpu->cpum.GstCtx.ebp;9357 SET_SS_DEF();9358 }9359 else9360 {9361 uint32_t u32Disp;9362 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9363 u32EffAddr += u32Disp;9364 uInfo |= u32Disp;9365 }9366 break;9367 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;9368 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;9369 IEM_NOT_REACHED_DEFAULT_CASE_RET();9370 }9371 break;9372 }9373 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;9374 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9375 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9376 IEM_NOT_REACHED_DEFAULT_CASE_RET();9377 }9378 9379 /* Get and add the displacement. */9380 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9381 {9382 case 0:9383 break;9384 case 1:9385 {9386 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);9387 u32EffAddr += i8Disp;9388 uInfo |= (uint32_t)(int32_t)i8Disp;9389 break;9390 }9391 case 2:9392 {9393 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);9394 u32EffAddr += u32Disp;9395 uInfo |= (uint32_t)u32Disp;9396 break;9397 }9398 default:9399 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */9400 }9401 9402 }9403 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9404 *pGCPtrEff = u32EffAddr;9405 }9406 }9407 else9408 {9409 uint64_t u64EffAddr;9410 9411 /* Handle the rip+disp32 form with no registers first. */9412 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9413 {9414 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);9415 uInfo = (uint32_t)u64EffAddr;9416 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));9417 }9418 else9419 {9420 /* Get the register (or SIB) value. */9421 uInfo = 0;9422 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)9423 {9424 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9425 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9426 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9427 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9428 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;9429 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9430 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9431 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9432 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9433 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9434 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9435 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9436 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9437 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9438 /* SIB */9439 case 4:9440 case 12:9441 {9442 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9443 uInfo = (uint64_t)bSib << 32;9444 9445 /* Get the index and scale it. */9446 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)9447 {9448 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9449 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9450 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9451 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9452 case 4: u64EffAddr = 0; /*none */ break;9453 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;9454 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9455 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9456 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9457 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9458 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9459 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9460 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;9461 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9462 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9463 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9464 IEM_NOT_REACHED_DEFAULT_CASE_RET();9465 }9466 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9467 9468 /* add base */9469 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)9470 {9471 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;9472 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;9473 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;9474 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;9475 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9476 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;9477 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;9478 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;9479 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;9480 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;9481 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;9482 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;9483 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;9484 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;9485 /* complicated encodings */9486 case 5:9487 case 13:9488 if ((bRm & X86_MODRM_MOD_MASK) != 0)9489 {9490 if (!pVCpu->iem.s.uRexB)9491 {9492 u64EffAddr += pVCpu->cpum.GstCtx.rbp;9493 SET_SS_DEF();9494 }9495 else9496 u64EffAddr += pVCpu->cpum.GstCtx.r13;9497 }9498 else9499 {9500 uint32_t u32Disp;9501 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9502 u64EffAddr += (int32_t)u32Disp;9503 uInfo |= u32Disp;9504 }9505 break;9506 IEM_NOT_REACHED_DEFAULT_CASE_RET();9507 }9508 break;9509 }9510 IEM_NOT_REACHED_DEFAULT_CASE_RET();9511 }9512 9513 /* Get and add the displacement. */9514 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9515 {9516 case 0:9517 break;9518 case 1:9519 {9520 int8_t i8Disp;9521 IEM_OPCODE_GET_NEXT_S8(&i8Disp);9522 u64EffAddr += i8Disp;9523 uInfo |= (uint32_t)(int32_t)i8Disp;9524 break;9525 }9526 case 2:9527 {9528 uint32_t u32Disp;9529 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9530 u64EffAddr += (int32_t)u32Disp;9531 uInfo |= u32Disp;9532 break;9533 }9534 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */9535 }9536 9537 }9538 9539 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)9540 *pGCPtrEff = u64EffAddr;9541 else9542 {9543 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9544 *pGCPtrEff = u64EffAddr & UINT32_MAX;9545 }9546 }9547 *puInfo = uInfo;9548 9549 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));9550 return VINF_SUCCESS;9551 }9552 9553 /** @} */9554 9555 9556 #ifdef LOG_ENABLED9557 /**9558 * Logs the current instruction.9559 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9560 * @param fSameCtx Set if we have the same context information as the VMM,9561 * clear if we may have already executed an instruction in9562 * our debug context. When clear, we assume IEMCPU holds9563 * valid CPU mode info.9564 *9565 * The @a fSameCtx parameter is now misleading and obsolete.9566 * @param pszFunction The IEM function doing the execution.9567 */9568 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT9569 {9570 # ifdef IN_RING39571 if (LogIs2Enabled())9572 {9573 char szInstr[256];9574 uint32_t cbInstr = 0;9575 if (fSameCtx)9576 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,9577 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,9578 szInstr, sizeof(szInstr), &cbInstr);9579 else9580 {9581 uint32_t fFlags = 0;9582 switch (IEM_GET_CPU_MODE(pVCpu))9583 {9584 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;9585 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;9586 case IEMMODE_16BIT:9587 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)9588 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;9589 else9590 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;9591 break;9592 }9593 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,9594 szInstr, sizeof(szInstr), &cbInstr);9595 }9596 9597 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;9598 Log2(("**** %s fExec=%x\n"9599 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"9600 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"9601 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"9602 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"9603 " %s\n"9604 , pszFunction, pVCpu->iem.s.fExec,9605 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,9606 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,9607 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,9608 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,9609 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,9610 szInstr));9611 9612 /* This stuff sucks atm. as it fills the log with MSRs. */9613 //if (LogIs3Enabled())9614 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);9615 }9616 else9617 # endif9618 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,9619 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));9620 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);9621 }9622 #endif /* LOG_ENABLED */9623 9624 9625 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9626 /**9627 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,9628 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.9629 *9630 * @returns Modified rcStrict.9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.9632 * @param rcStrict The instruction execution status.9633 */9634 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT9635 {9636 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));9637 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))9638 {9639 /* VMX preemption timer takes priority over NMI-window exits. */9640 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))9641 {9642 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);9643 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));9644 }9645 /*9646 * Check remaining intercepts.9647 *9648 * NMI-window and Interrupt-window VM-exits.9649 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.9650 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.9651 *9652 * See Intel spec. 26.7.6 "NMI-Window Exiting".9653 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".9654 */9655 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)9656 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)9657 && !TRPMHasTrap(pVCpu))9658 {9659 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));9660 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)9661 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))9662 {9663 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);9664 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));9665 }9666 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)9667 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))9668 {9669 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);9670 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));9671 }9672 }9673 }9674 /* TPR-below threshold/APIC write has the highest priority. */9675 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))9676 {9677 rcStrict = iemVmxApicWriteEmulation(pVCpu);9678 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));9679 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));9680 }9681 /* MTF takes priority over VMX-preemption timer. */9682 else9683 {9684 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);9685 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));9686 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));9687 }9688 return rcStrict;9689 }9690 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */9691 9692 9693 /**9694 * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,9695 * IEMExecOneBypass and friends.9696 *9697 * Similar code is found in IEMExecLots.9698 *9699 * @return Strict VBox status code.9700 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9701 * @param fExecuteInhibit If set, execute the instruction following CLI,9702 * POP SS and MOV SS,GR.9703 * @param pszFunction The calling function name.9704 */9705 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)9706 {9707 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9708 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9709 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9710 RT_NOREF_PV(pszFunction);9711 9712 #ifdef IEM_WITH_SETJMP9713 VBOXSTRICTRC rcStrict;9714 IEM_TRY_SETJMP(pVCpu, rcStrict)9715 {9716 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9717 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9718 }9719 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);9720 {9721 pVCpu->iem.s.cLongJumps++;9722 }9723 IEM_CATCH_LONGJMP_END(pVCpu);9724 #else9725 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9726 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9727 #endif9728 if (rcStrict == VINF_SUCCESS)9729 pVCpu->iem.s.cInstructions++;9730 if (pVCpu->iem.s.cActiveMappings > 0)9731 {9732 Assert(rcStrict != VINF_SUCCESS);9733 iemMemRollback(pVCpu);9734 }9735 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9736 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9737 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9738 9739 //#ifdef DEBUG9740 // AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));9741 //#endif9742 9743 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9744 /*9745 * Perform any VMX nested-guest instruction boundary actions.9746 *9747 * If any of these causes a VM-exit, we must skip executing the next9748 * instruction (would run into stale page tables). A VM-exit makes sure9749 * there is no interrupt-inhibition, so that should ensure we don't go9750 * to try execute the next instruction. Clearing fExecuteInhibit is9751 * problematic because of the setjmp/longjmp clobbering above.9752 */9753 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER9754 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)9755 || rcStrict != VINF_SUCCESS)9756 { /* likely */ }9757 else9758 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);9759 #endif9760 9761 /* Execute the next instruction as well if a cli, pop ss or9762 mov ss, Gr has just completed successfully. */9763 if ( fExecuteInhibit9764 && rcStrict == VINF_SUCCESS9765 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))9766 {9767 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));9768 if (rcStrict == VINF_SUCCESS)9769 {9770 #ifdef LOG_ENABLED9771 iemLogCurInstr(pVCpu, false, pszFunction);9772 #endif9773 #ifdef IEM_WITH_SETJMP9774 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)9775 {9776 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9777 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9778 }9779 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);9780 {9781 pVCpu->iem.s.cLongJumps++;9782 }9783 IEM_CATCH_LONGJMP_END(pVCpu);9784 #else9785 IEM_OPCODE_GET_FIRST_U8(&b);9786 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9787 #endif9788 if (rcStrict == VINF_SUCCESS)9789 {9790 pVCpu->iem.s.cInstructions++;9791 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9792 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER9793 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))9794 { /* likely */ }9795 else9796 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);9797 #endif9798 }9799 if (pVCpu->iem.s.cActiveMappings > 0)9800 {9801 Assert(rcStrict != VINF_SUCCESS);9802 iemMemRollback(pVCpu);9803 }9804 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9805 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9806 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9807 }9808 else if (pVCpu->iem.s.cActiveMappings > 0)9809 iemMemRollback(pVCpu);9810 /** @todo drop this after we bake this change into RIP advancing. */9811 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */9812 }9813 9814 /*9815 * Return value fiddling, statistics and sanity assertions.9816 */9817 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);9818 9819 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));9820 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));9821 return rcStrict;9822 }9823 9824 9825 /**9826 * Execute one instruction.9827 *9828 * @return Strict VBox status code.9829 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9830 */9831 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)9832 {9833 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */9834 #ifdef LOG_ENABLED9835 iemLogCurInstr(pVCpu, true, "IEMExecOne");9836 #endif9837 9838 /*9839 * Do the decoding and emulation.9840 */9841 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);9842 if (rcStrict == VINF_SUCCESS)9843 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");9844 else if (pVCpu->iem.s.cActiveMappings > 0)9845 iemMemRollback(pVCpu);9846 9847 if (rcStrict != VINF_SUCCESS)9848 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",9849 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));9850 return rcStrict;9851 }9852 9853 9854 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,9855 const void *pvOpcodeBytes, size_t cbOpcodeBytes)9856 {9857 VBOXSTRICTRC rcStrict;9858 if ( cbOpcodeBytes9859 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)9860 {9861 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);9862 #ifdef IEM_WITH_CODE_TLB9863 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;9864 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;9865 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);9866 pVCpu->iem.s.offCurInstrStart = 0;9867 pVCpu->iem.s.offInstrNextByte = 0;9868 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;9869 #else9870 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));9871 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);9872 #endif9873 rcStrict = VINF_SUCCESS;9874 }9875 else9876 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);9877 if (rcStrict == VINF_SUCCESS)9878 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");9879 else if (pVCpu->iem.s.cActiveMappings > 0)9880 iemMemRollback(pVCpu);9881 9882 return rcStrict;9883 }9884 9885 9886 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)9887 {9888 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);9889 if (rcStrict == VINF_SUCCESS)9890 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");9891 else if (pVCpu->iem.s.cActiveMappings > 0)9892 iemMemRollback(pVCpu);9893 9894 return rcStrict;9895 }9896 9897 9898 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,9899 const void *pvOpcodeBytes, size_t cbOpcodeBytes)9900 {9901 VBOXSTRICTRC rcStrict;9902 if ( cbOpcodeBytes9903 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)9904 {9905 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);9906 #ifdef IEM_WITH_CODE_TLB9907 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;9908 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;9909 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);9910 pVCpu->iem.s.offCurInstrStart = 0;9911 pVCpu->iem.s.offInstrNextByte = 0;9912 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;9913 #else9914 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));9915 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);9916 #endif9917 rcStrict = VINF_SUCCESS;9918 }9919 else9920 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);9921 if (rcStrict == VINF_SUCCESS)9922 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");9923 else if (pVCpu->iem.s.cActiveMappings > 0)9924 iemMemRollback(pVCpu);9925 9926 return rcStrict;9927 }9928 9929 9930 /**9931 * For handling split cacheline lock operations when the host has split-lock9932 * detection enabled.9933 *9934 * This will cause the interpreter to disregard the lock prefix and implicit9935 * locking (xchg).9936 *9937 * @returns Strict VBox status code.9938 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9939 */9940 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)9941 {9942 /*9943 * Do the decoding and emulation.9944 */9945 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);9946 if (rcStrict == VINF_SUCCESS)9947 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");9948 else if (pVCpu->iem.s.cActiveMappings > 0)9949 iemMemRollback(pVCpu);9950 9951 if (rcStrict != VINF_SUCCESS)9952 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",9953 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));9954 return rcStrict;9955 }9956 9957 9958 /**9959 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to9960 * inject a pending TRPM trap.9961 */9962 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)9963 {9964 Assert(TRPMHasTrap(pVCpu));9965 9966 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)9967 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))9968 {9969 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */9970 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)9971 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);9972 if (fIntrEnabled)9973 {9974 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))9975 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;9976 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))9977 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));9978 else9979 {9980 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));9981 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));9982 }9983 }9984 #else9985 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;9986 #endif9987 if (fIntrEnabled)9988 {9989 uint8_t u8TrapNo;9990 TRPMEVENT enmType;9991 uint32_t uErrCode;9992 RTGCPTR uCr2;9993 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);9994 AssertRC(rc2);9995 Assert(enmType == TRPM_HARDWARE_INT);9996 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);9997 9998 TRPMResetTrap(pVCpu);9999 10000 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10001 /* Injecting an event may cause a VM-exit. */10002 if ( rcStrict != VINF_SUCCESS10003 && rcStrict != VINF_IEM_RAISED_XCPT)10004 return iemExecStatusCodeFiddling(pVCpu, rcStrict);10005 #else10006 NOREF(rcStrict);10007 #endif10008 }10009 }10010 10011 return VINF_SUCCESS;10012 }10013 10014 10015 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)10016 {10017 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;10018 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));10019 Assert(cMaxInstructions > 0);10020 10021 /*10022 * See if there is an interrupt pending in TRPM, inject it if we can.10023 */10024 /** @todo What if we are injecting an exception and not an interrupt? Is that10025 * possible here? For now we assert it is indeed only an interrupt. */10026 if (!TRPMHasTrap(pVCpu))10027 { /* likely */ }10028 else10029 {10030 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);10031 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10032 { /*likely */ }10033 else10034 return rcStrict;10035 }10036 10037 /*10038 * Initial decoder init w/ prefetch, then setup setjmp.10039 */10040 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);10041 if (rcStrict == VINF_SUCCESS)10042 {10043 #ifdef IEM_WITH_SETJMP10044 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */10045 IEM_TRY_SETJMP(pVCpu, rcStrict)10046 #endif10047 {10048 /*10049 * The run loop. We limit ourselves to 4096 instructions right now.10050 */10051 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;10052 PVMCC pVM = pVCpu->CTX_SUFF(pVM);10053 for (;;)10054 {10055 /*10056 * Log the state.10057 */10058 #ifdef LOG_ENABLED10059 iemLogCurInstr(pVCpu, true, "IEMExecLots");10060 #endif10061 10062 /*10063 * Do the decoding and emulation.10064 */10065 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);10066 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);10067 #ifdef VBOX_STRICT10068 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);10069 #endif10070 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10071 {10072 Assert(pVCpu->iem.s.cActiveMappings == 0);10073 pVCpu->iem.s.cInstructions++;10074 10075 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10076 /* Perform any VMX nested-guest instruction boundary actions. */10077 uint64_t fCpu = pVCpu->fLocalForcedActions;10078 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER10079 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))10080 { /* likely */ }10081 else10082 {10083 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);10084 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10085 fCpu = pVCpu->fLocalForcedActions;10086 else10087 {10088 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10089 break;10090 }10091 }10092 #endif10093 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))10094 {10095 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX10096 uint64_t fCpu = pVCpu->fLocalForcedActions;10097 #endif10098 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR310099 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL10100 | VMCPU_FF_TLB_FLUSH10101 | VMCPU_FF_UNHALT );10102 10103 if (RT_LIKELY( ( !fCpu10104 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))10105 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )10106 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))10107 {10108 if (--cMaxInstructionsGccStupidity > 0)10109 {10110 /* Poll timers every now an then according to the caller's specs. */10111 if ( (cMaxInstructionsGccStupidity & cPollRate) != 010112 || !TMTimerPollBool(pVM, pVCpu))10113 {10114 Assert(pVCpu->iem.s.cActiveMappings == 0);10115 iemReInitDecoder(pVCpu);10116 continue;10117 }10118 }10119 }10120 }10121 Assert(pVCpu->iem.s.cActiveMappings == 0);10122 }10123 else if (pVCpu->iem.s.cActiveMappings > 0)10124 iemMemRollback(pVCpu);10125 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10126 break;10127 }10128 }10129 #ifdef IEM_WITH_SETJMP10130 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);10131 {10132 if (pVCpu->iem.s.cActiveMappings > 0)10133 iemMemRollback(pVCpu);10134 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10135 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10136 # endif10137 pVCpu->iem.s.cLongJumps++;10138 }10139 IEM_CATCH_LONGJMP_END(pVCpu);10140 #endif10141 10142 /*10143 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).10144 */10145 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));10146 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));10147 }10148 else10149 {10150 if (pVCpu->iem.s.cActiveMappings > 0)10151 iemMemRollback(pVCpu);10152 10153 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10154 /*10155 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching10156 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.10157 */10158 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10159 #endif10160 }10161 10162 /*10163 * Maybe re-enter raw-mode and log.10164 */10165 if (rcStrict != VINF_SUCCESS)10166 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",10167 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));10168 if (pcInstructions)10169 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;10170 return rcStrict;10171 }10172 10173 10174 /**10175 * Interface used by EMExecuteExec, does exit statistics and limits.10176 *10177 * @returns Strict VBox status code.10178 * @param pVCpu The cross context virtual CPU structure.10179 * @param fWillExit To be defined.10180 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.10181 * @param cMaxInstructions Maximum number of instructions to execute.10182 * @param cMaxInstructionsWithoutExits10183 * The max number of instructions without exits.10184 * @param pStats Where to return statistics.10185 */10186 VMM_INT_DECL(VBOXSTRICTRC)10187 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,10188 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)10189 {10190 NOREF(fWillExit); /** @todo define flexible exit crits */10191 10192 /*10193 * Initialize return stats.10194 */10195 pStats->cInstructions = 0;10196 pStats->cExits = 0;10197 pStats->cMaxExitDistance = 0;10198 pStats->cReserved = 0;10199 10200 /*10201 * Initial decoder init w/ prefetch, then setup setjmp.10202 */10203 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);10204 if (rcStrict == VINF_SUCCESS)10205 {10206 #ifdef IEM_WITH_SETJMP10207 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */10208 IEM_TRY_SETJMP(pVCpu, rcStrict)10209 #endif10210 {10211 #ifdef IN_RING010212 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);10213 #endif10214 uint32_t cInstructionSinceLastExit = 0;10215 10216 /*10217 * The run loop. We limit ourselves to 4096 instructions right now.10218 */10219 PVM pVM = pVCpu->CTX_SUFF(pVM);10220 for (;;)10221 {10222 /*10223 * Log the state.10224 */10225 #ifdef LOG_ENABLED10226 iemLogCurInstr(pVCpu, true, "IEMExecForExits");10227 #endif10228 10229 /*10230 * Do the decoding and emulation.10231 */10232 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;10233 10234 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);10235 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);10236 10237 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits10238 && cInstructionSinceLastExit > 0 /* don't count the first */ )10239 {10240 pStats->cExits += 1;10241 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)10242 pStats->cMaxExitDistance = cInstructionSinceLastExit;10243 cInstructionSinceLastExit = 0;10244 }10245 10246 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10247 {10248 Assert(pVCpu->iem.s.cActiveMappings == 0);10249 pVCpu->iem.s.cInstructions++;10250 pStats->cInstructions++;10251 cInstructionSinceLastExit++;10252 10253 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10254 /* Perform any VMX nested-guest instruction boundary actions. */10255 uint64_t fCpu = pVCpu->fLocalForcedActions;10256 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER10257 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))10258 { /* likely */ }10259 else10260 {10261 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);10262 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10263 fCpu = pVCpu->fLocalForcedActions;10264 else10265 {10266 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10267 break;10268 }10269 }10270 #endif10271 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))10272 {10273 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX10274 uint64_t fCpu = pVCpu->fLocalForcedActions;10275 #endif10276 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR310277 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL10278 | VMCPU_FF_TLB_FLUSH10279 | VMCPU_FF_UNHALT );10280 if (RT_LIKELY( ( ( !fCpu10281 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))10282 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))10283 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )10284 || pStats->cInstructions < cMinInstructions))10285 {10286 if (pStats->cInstructions < cMaxInstructions)10287 {10288 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)10289 {10290 #ifdef IN_RING010291 if ( !fCheckPreemptionPending10292 || !RTThreadPreemptIsPending(NIL_RTTHREAD))10293 #endif10294 {10295 Assert(pVCpu->iem.s.cActiveMappings == 0);10296 iemReInitDecoder(pVCpu);10297 continue;10298 }10299 #ifdef IN_RING010300 rcStrict = VINF_EM_RAW_INTERRUPT;10301 break;10302 #endif10303 }10304 }10305 }10306 Assert(!(fCpu & VMCPU_FF_IEM));10307 }10308 Assert(pVCpu->iem.s.cActiveMappings == 0);10309 }10310 else if (pVCpu->iem.s.cActiveMappings > 0)10311 iemMemRollback(pVCpu);10312 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10313 break;10314 }10315 }10316 #ifdef IEM_WITH_SETJMP10317 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);10318 {10319 if (pVCpu->iem.s.cActiveMappings > 0)10320 iemMemRollback(pVCpu);10321 pVCpu->iem.s.cLongJumps++;10322 }10323 IEM_CATCH_LONGJMP_END(pVCpu);10324 #endif10325 10326 /*10327 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).10328 */10329 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));10330 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));10331 }10332 else10333 {10334 if (pVCpu->iem.s.cActiveMappings > 0)10335 iemMemRollback(pVCpu);10336 10337 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10338 /*10339 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching10340 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.10341 */10342 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10343 #endif10344 }10345 10346 /*10347 * Maybe re-enter raw-mode and log.10348 */10349 if (rcStrict != VINF_SUCCESS)10350 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",10351 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,10352 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));10353 return rcStrict;10354 }10355 10356 10357 /**10358 * Injects a trap, fault, abort, software interrupt or external interrupt.10359 *10360 * The parameter list matches TRPMQueryTrapAll pretty closely.10361 *10362 * @returns Strict VBox status code.10363 * @param pVCpu The cross context virtual CPU structure of the calling EMT.10364 * @param u8TrapNo The trap number.10365 * @param enmType What type is it (trap/fault/abort), software10366 * interrupt or hardware interrupt.10367 * @param uErrCode The error code if applicable.10368 * @param uCr2 The CR2 value if applicable.10369 * @param cbInstr The instruction length (only relevant for10370 * software interrupts).10371 */10372 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,10373 uint8_t cbInstr)10374 {10375 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */10376 #ifdef DBGFTRACE_ENABLED10377 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",10378 u8TrapNo, enmType, uErrCode, uCr2);10379 #endif10380 10381 uint32_t fFlags;10382 switch (enmType)10383 {10384 case TRPM_HARDWARE_INT:10385 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));10386 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;10387 uErrCode = uCr2 = 0;10388 break;10389 10390 case TRPM_SOFTWARE_INT:10391 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));10392 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;10393 uErrCode = uCr2 = 0;10394 break;10395 10396 case TRPM_TRAP:10397 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */10398 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));10399 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;10400 if (u8TrapNo == X86_XCPT_PF)10401 fFlags |= IEM_XCPT_FLAGS_CR2;10402 switch (u8TrapNo)10403 {10404 case X86_XCPT_DF:10405 case X86_XCPT_TS:10406 case X86_XCPT_NP:10407 case X86_XCPT_SS:10408 case X86_XCPT_PF:10409 case X86_XCPT_AC:10410 case X86_XCPT_GP:10411 fFlags |= IEM_XCPT_FLAGS_ERR;10412 break;10413 }10414 break;10415 10416 IEM_NOT_REACHED_DEFAULT_CASE_RET();10417 }10418 10419 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);10420 10421 if (pVCpu->iem.s.cActiveMappings > 0)10422 iemMemRollback(pVCpu);10423 10424 return rcStrict;10425 }10426 10427 10428 /**10429 * Injects the active TRPM event.10430 *10431 * @returns Strict VBox status code.10432 * @param pVCpu The cross context virtual CPU structure.10433 */10434 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)10435 {10436 #ifndef IEM_IMPLEMENTS_TASKSWITCH10437 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));10438 #else10439 uint8_t u8TrapNo;10440 TRPMEVENT enmType;10441 uint32_t uErrCode;10442 RTGCUINTPTR uCr2;10443 uint8_t cbInstr;10444 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);10445 if (RT_FAILURE(rc))10446 return rc;10447 10448 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle10449 * ICEBP \#DB injection as a special case. */10450 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);10451 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM10452 if (rcStrict == VINF_SVM_VMEXIT)10453 rcStrict = VINF_SUCCESS;10454 #endif10455 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10456 if (rcStrict == VINF_VMX_VMEXIT)10457 rcStrict = VINF_SUCCESS;10458 #endif10459 /** @todo Are there any other codes that imply the event was successfully10460 * delivered to the guest? See @bugref{6607}. */10461 if ( rcStrict == VINF_SUCCESS10462 || rcStrict == VINF_IEM_RAISED_XCPT)10463 TRPMResetTrap(pVCpu);10464 10465 return rcStrict;10466 #endif10467 }10468 10469 10470 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)10471 {10472 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);10473 return VERR_NOT_IMPLEMENTED;10474 }10475 10476 10477 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)10478 {10479 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);10480 return VERR_NOT_IMPLEMENTED;10481 }10482 10483 10484 /**10485 * Interface for HM and EM for executing string I/O OUT (write) instructions.10486 *10487 * This API ASSUMES that the caller has already verified that the guest code is10488 * allowed to access the I/O port. (The I/O port is in the DX register in the10489 * guest state.)10490 *10491 * @returns Strict VBox status code.10492 * @param pVCpu The cross context virtual CPU structure.10493 * @param cbValue The size of the I/O port access (1, 2, or 4).10494 * @param enmAddrMode The addressing mode.10495 * @param fRepPrefix Indicates whether a repeat prefix is used10496 * (doesn't matter which for this instruction).10497 * @param cbInstr The instruction length in bytes.10498 * @param iEffSeg The effective segment address.10499 * @param fIoChecked Whether the access to the I/O port has been10500 * checked or not. It's typically checked in the10501 * HM scenario.10502 */10503 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,10504 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)10505 {10506 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);10507 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10508 10509 /*10510 * State init.10511 */10512 iemInitExec(pVCpu, 0 /*fExecOpts*/);10513 10514 /*10515 * Switch orgy for getting to the right handler.10516 */10517 VBOXSTRICTRC rcStrict;10518 if (fRepPrefix)10519 {10520 switch (enmAddrMode)10521 {10522 case IEMMODE_16BIT:10523 switch (cbValue)10524 {10525 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10526 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10527 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10528 default:10529 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10530 }10531 break;10532 10533 case IEMMODE_32BIT:10534 switch (cbValue)10535 {10536 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10537 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10538 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10539 default:10540 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10541 }10542 break;10543 10544 case IEMMODE_64BIT:10545 switch (cbValue)10546 {10547 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10548 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10549 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10550 default:10551 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10552 }10553 break;10554 10555 default:10556 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10557 }10558 }10559 else10560 {10561 switch (enmAddrMode)10562 {10563 case IEMMODE_16BIT:10564 switch (cbValue)10565 {10566 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10567 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10568 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10569 default:10570 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10571 }10572 break;10573 10574 case IEMMODE_32BIT:10575 switch (cbValue)10576 {10577 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10578 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10579 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10580 default:10581 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10582 }10583 break;10584 10585 case IEMMODE_64BIT:10586 switch (cbValue)10587 {10588 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10589 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10590 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10591 default:10592 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10593 }10594 break;10595 10596 default:10597 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10598 }10599 }10600 10601 if (pVCpu->iem.s.cActiveMappings)10602 iemMemRollback(pVCpu);10603 10604 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10605 }10606 10607 10608 /**10609 * Interface for HM and EM for executing string I/O IN (read) instructions.10610 *10611 * This API ASSUMES that the caller has already verified that the guest code is10612 * allowed to access the I/O port. (The I/O port is in the DX register in the10613 * guest state.)10614 *10615 * @returns Strict VBox status code.10616 * @param pVCpu The cross context virtual CPU structure.10617 * @param cbValue The size of the I/O port access (1, 2, or 4).10618 * @param enmAddrMode The addressing mode.10619 * @param fRepPrefix Indicates whether a repeat prefix is used10620 * (doesn't matter which for this instruction).10621 * @param cbInstr The instruction length in bytes.10622 * @param fIoChecked Whether the access to the I/O port has been10623 * checked or not. It's typically checked in the10624 * HM scenario.10625 */10626 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,10627 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)10628 {10629 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10630 10631 /*10632 * State init.10633 */10634 iemInitExec(pVCpu, 0 /*fExecOpts*/);10635 10636 /*10637 * Switch orgy for getting to the right handler.10638 */10639 VBOXSTRICTRC rcStrict;10640 if (fRepPrefix)10641 {10642 switch (enmAddrMode)10643 {10644 case IEMMODE_16BIT:10645 switch (cbValue)10646 {10647 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;10648 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;10649 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;10650 default:10651 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10652 }10653 break;10654 10655 case IEMMODE_32BIT:10656 switch (cbValue)10657 {10658 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;10659 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;10660 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;10661 default:10662 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10663 }10664 break;10665 10666 case IEMMODE_64BIT:10667 switch (cbValue)10668 {10669 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;10670 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;10671 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;10672 default:10673 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10674 }10675 break;10676 10677 default:10678 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10679 }10680 }10681 else10682 {10683 switch (enmAddrMode)10684 {10685 case IEMMODE_16BIT:10686 switch (cbValue)10687 {10688 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;10689 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;10690 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;10691 default:10692 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10693 }10694 break;10695 10696 case IEMMODE_32BIT:10697 switch (cbValue)10698 {10699 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;10700 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;10701 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;10702 default:10703 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10704 }10705 break;10706 10707 case IEMMODE_64BIT:10708 switch (cbValue)10709 {10710 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;10711 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;10712 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;10713 default:10714 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10715 }10716 break;10717 10718 default:10719 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10720 }10721 }10722 10723 if ( pVCpu->iem.s.cActiveMappings == 010724 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))10725 { /* likely */ }10726 else10727 {10728 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));10729 iemMemRollback(pVCpu);10730 }10731 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10732 }10733 10734 10735 /**10736 * Interface for rawmode to write execute an OUT instruction.10737 *10738 * @returns Strict VBox status code.10739 * @param pVCpu The cross context virtual CPU structure.10740 * @param cbInstr The instruction length in bytes.10741 * @param u16Port The port to read.10742 * @param fImm Whether the port is specified using an immediate operand or10743 * using the implicit DX register.10744 * @param cbReg The register size.10745 *10746 * @remarks In ring-0 not all of the state needs to be synced in.10747 */10748 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)10749 {10750 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10751 Assert(cbReg <= 4 && cbReg != 3);10752 10753 iemInitExec(pVCpu, 0 /*fExecOpts*/);10754 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,10755 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);10756 Assert(!pVCpu->iem.s.cActiveMappings);10757 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10758 }10759 10760 10761 /**10762 * Interface for rawmode to write execute an IN instruction.10763 *10764 * @returns Strict VBox status code.10765 * @param pVCpu The cross context virtual CPU structure.10766 * @param cbInstr The instruction length in bytes.10767 * @param u16Port The port to read.10768 * @param fImm Whether the port is specified using an immediate operand or10769 * using the implicit DX.10770 * @param cbReg The register size.10771 */10772 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)10773 {10774 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10775 Assert(cbReg <= 4 && cbReg != 3);10776 10777 iemInitExec(pVCpu, 0 /*fExecOpts*/);10778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,10779 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);10780 Assert(!pVCpu->iem.s.cActiveMappings);10781 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10782 }10783 10784 10785 /**10786 * Interface for HM and EM to write to a CRx register.10787 *10788 * @returns Strict VBox status code.10789 * @param pVCpu The cross context virtual CPU structure.10790 * @param cbInstr The instruction length in bytes.10791 * @param iCrReg The control register number (destination).10792 * @param iGReg The general purpose register number (source).10793 *10794 * @remarks In ring-0 not all of the state needs to be synced in.10795 */10796 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)10797 {10798 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10799 Assert(iCrReg < 16);10800 Assert(iGReg < 16);10801 10802 iemInitExec(pVCpu, 0 /*fExecOpts*/);10803 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);10804 Assert(!pVCpu->iem.s.cActiveMappings);10805 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10806 }10807 10808 10809 /**10810 * Interface for HM and EM to read from a CRx register.10811 *10812 * @returns Strict VBox status code.10813 * @param pVCpu The cross context virtual CPU structure.10814 * @param cbInstr The instruction length in bytes.10815 * @param iGReg The general purpose register number (destination).10816 * @param iCrReg The control register number (source).10817 *10818 * @remarks In ring-0 not all of the state needs to be synced in.10819 */10820 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)10821 {10822 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10823 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR410824 | CPUMCTX_EXTRN_APIC_TPR);10825 Assert(iCrReg < 16);10826 Assert(iGReg < 16);10827 10828 iemInitExec(pVCpu, 0 /*fExecOpts*/);10829 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);10830 Assert(!pVCpu->iem.s.cActiveMappings);10831 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10832 }10833 10834 10835 /**10836 * Interface for HM and EM to write to a DRx register.10837 *10838 * @returns Strict VBox status code.10839 * @param pVCpu The cross context virtual CPU structure.10840 * @param cbInstr The instruction length in bytes.10841 * @param iDrReg The debug register number (destination).10842 * @param iGReg The general purpose register number (source).10843 *10844 * @remarks In ring-0 not all of the state needs to be synced in.10845 */10846 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)10847 {10848 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10849 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);10850 Assert(iDrReg < 8);10851 Assert(iGReg < 16);10852 10853 iemInitExec(pVCpu, 0 /*fExecOpts*/);10854 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);10855 Assert(!pVCpu->iem.s.cActiveMappings);10856 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10857 }10858 10859 10860 /**10861 * Interface for HM and EM to read from a DRx register.10862 *10863 * @returns Strict VBox status code.10864 * @param pVCpu The cross context virtual CPU structure.10865 * @param cbInstr The instruction length in bytes.10866 * @param iGReg The general purpose register number (destination).10867 * @param iDrReg The debug register number (source).10868 *10869 * @remarks In ring-0 not all of the state needs to be synced in.10870 */10871 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)10872 {10873 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10874 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);10875 Assert(iDrReg < 8);10876 Assert(iGReg < 16);10877 10878 iemInitExec(pVCpu, 0 /*fExecOpts*/);10879 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);10880 Assert(!pVCpu->iem.s.cActiveMappings);10881 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10882 }10883 10884 10885 /**10886 * Interface for HM and EM to clear the CR0[TS] bit.10887 *10888 * @returns Strict VBox status code.10889 * @param pVCpu The cross context virtual CPU structure.10890 * @param cbInstr The instruction length in bytes.10891 *10892 * @remarks In ring-0 not all of the state needs to be synced in.10893 */10894 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)10895 {10896 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10897 10898 iemInitExec(pVCpu, 0 /*fExecOpts*/);10899 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);10900 Assert(!pVCpu->iem.s.cActiveMappings);10901 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10902 }10903 10904 10905 /**10906 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).10907 *10908 * @returns Strict VBox status code.10909 * @param pVCpu The cross context virtual CPU structure.10910 * @param cbInstr The instruction length in bytes.10911 * @param uValue The value to load into CR0.10912 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a10913 * memory operand. Otherwise pass NIL_RTGCPTR.10914 *10915 * @remarks In ring-0 not all of the state needs to be synced in.10916 */10917 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)10918 {10919 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);10920 10921 iemInitExec(pVCpu, 0 /*fExecOpts*/);10922 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);10923 Assert(!pVCpu->iem.s.cActiveMappings);10924 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10925 }10926 10927 10928 /**10929 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).10930 *10931 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.10932 *10933 * @returns Strict VBox status code.10934 * @param pVCpu The cross context virtual CPU structure of the calling EMT.10935 * @param cbInstr The instruction length in bytes.10936 * @remarks In ring-0 not all of the state needs to be synced in.10937 * @thread EMT(pVCpu)10938 */10939 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)10940 {10941 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);10942 10943 iemInitExec(pVCpu, 0 /*fExecOpts*/);10944 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);10945 Assert(!pVCpu->iem.s.cActiveMappings);10946 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10947 }10948 10949 10950 /**10951 * Interface for HM and EM to emulate the WBINVD instruction.10952 *10953 * @returns Strict VBox status code.10954 * @param pVCpu The cross context virtual CPU structure.10955 * @param cbInstr The instruction length in bytes.10956 *10957 * @remarks In ring-0 not all of the state needs to be synced in.10958 */10959 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)10960 {10961 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10962 10963 iemInitExec(pVCpu, 0 /*fExecOpts*/);10964 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);10965 Assert(!pVCpu->iem.s.cActiveMappings);10966 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10967 }10968 10969 10970 /**10971 * Interface for HM and EM to emulate the INVD instruction.10972 *10973 * @returns Strict VBox status code.10974 * @param pVCpu The cross context virtual CPU structure.10975 * @param cbInstr The instruction length in bytes.10976 *10977 * @remarks In ring-0 not all of the state needs to be synced in.10978 */10979 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)10980 {10981 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10982 10983 iemInitExec(pVCpu, 0 /*fExecOpts*/);10984 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);10985 Assert(!pVCpu->iem.s.cActiveMappings);10986 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10987 }10988 10989 10990 /**10991 * Interface for HM and EM to emulate the INVLPG instruction.10992 *10993 * @returns Strict VBox status code.10994 * @retval VINF_PGM_SYNC_CR310995 *10996 * @param pVCpu The cross context virtual CPU structure.10997 * @param cbInstr The instruction length in bytes.10998 * @param GCPtrPage The effective address of the page to invalidate.10999 *11000 * @remarks In ring-0 not all of the state needs to be synced in.11001 */11002 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)11003 {11004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11005 11006 iemInitExec(pVCpu, 0 /*fExecOpts*/);11007 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);11008 Assert(!pVCpu->iem.s.cActiveMappings);11009 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11010 }11011 11012 11013 /**11014 * Interface for HM and EM to emulate the INVPCID instruction.11015 *11016 * @returns Strict VBox status code.11017 * @retval VINF_PGM_SYNC_CR311018 *11019 * @param pVCpu The cross context virtual CPU structure.11020 * @param cbInstr The instruction length in bytes.11021 * @param iEffSeg The effective segment register.11022 * @param GCPtrDesc The effective address of the INVPCID descriptor.11023 * @param uType The invalidation type.11024 *11025 * @remarks In ring-0 not all of the state needs to be synced in.11026 */11027 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,11028 uint64_t uType)11029 {11030 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);11031 11032 iemInitExec(pVCpu, 0 /*fExecOpts*/);11033 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);11034 Assert(!pVCpu->iem.s.cActiveMappings);11035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11036 }11037 11038 11039 /**11040 * Interface for HM and EM to emulate the CPUID instruction.11041 *11042 * @returns Strict VBox status code.11043 *11044 * @param pVCpu The cross context virtual CPU structure.11045 * @param cbInstr The instruction length in bytes.11046 *11047 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.11048 */11049 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)11050 {11051 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11052 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);11053 11054 iemInitExec(pVCpu, 0 /*fExecOpts*/);11055 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);11056 Assert(!pVCpu->iem.s.cActiveMappings);11057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11058 }11059 11060 11061 /**11062 * Interface for HM and EM to emulate the RDPMC instruction.11063 *11064 * @returns Strict VBox status code.11065 *11066 * @param pVCpu The cross context virtual CPU structure.11067 * @param cbInstr The instruction length in bytes.11068 *11069 * @remarks Not all of the state needs to be synced in.11070 */11071 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)11072 {11073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11074 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);11075 11076 iemInitExec(pVCpu, 0 /*fExecOpts*/);11077 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);11078 Assert(!pVCpu->iem.s.cActiveMappings);11079 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11080 }11081 11082 11083 /**11084 * Interface for HM and EM to emulate the RDTSC instruction.11085 *11086 * @returns Strict VBox status code.11087 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11088 *11089 * @param pVCpu The cross context virtual CPU structure.11090 * @param cbInstr The instruction length in bytes.11091 *11092 * @remarks Not all of the state needs to be synced in.11093 */11094 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)11095 {11096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11097 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);11098 11099 iemInitExec(pVCpu, 0 /*fExecOpts*/);11100 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);11101 Assert(!pVCpu->iem.s.cActiveMappings);11102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11103 }11104 11105 11106 /**11107 * Interface for HM and EM to emulate the RDTSCP instruction.11108 *11109 * @returns Strict VBox status code.11110 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11111 *11112 * @param pVCpu The cross context virtual CPU structure.11113 * @param cbInstr The instruction length in bytes.11114 *11115 * @remarks Not all of the state needs to be synced in. Recommended11116 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.11117 */11118 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)11119 {11120 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11121 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);11122 11123 iemInitExec(pVCpu, 0 /*fExecOpts*/);11124 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);11125 Assert(!pVCpu->iem.s.cActiveMappings);11126 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11127 }11128 11129 11130 /**11131 * Interface for HM and EM to emulate the RDMSR instruction.11132 *11133 * @returns Strict VBox status code.11134 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11135 *11136 * @param pVCpu The cross context virtual CPU structure.11137 * @param cbInstr The instruction length in bytes.11138 *11139 * @remarks Not all of the state needs to be synced in. Requires RCX and11140 * (currently) all MSRs.11141 */11142 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)11143 {11144 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11145 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);11146 11147 iemInitExec(pVCpu, 0 /*fExecOpts*/);11148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);11149 Assert(!pVCpu->iem.s.cActiveMappings);11150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11151 }11152 11153 11154 /**11155 * Interface for HM and EM to emulate the WRMSR instruction.11156 *11157 * @returns Strict VBox status code.11158 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11159 *11160 * @param pVCpu The cross context virtual CPU structure.11161 * @param cbInstr The instruction length in bytes.11162 *11163 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,11164 * and (currently) all MSRs.11165 */11166 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)11167 {11168 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11169 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK11170 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);11171 11172 iemInitExec(pVCpu, 0 /*fExecOpts*/);11173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);11174 Assert(!pVCpu->iem.s.cActiveMappings);11175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11176 }11177 11178 11179 /**11180 * Interface for HM and EM to emulate the MONITOR instruction.11181 *11182 * @returns Strict VBox status code.11183 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11184 *11185 * @param pVCpu The cross context virtual CPU structure.11186 * @param cbInstr The instruction length in bytes.11187 *11188 * @remarks Not all of the state needs to be synced in.11189 * @remarks ASSUMES the default segment of DS and no segment override prefixes11190 * are used.11191 */11192 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)11193 {11194 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11195 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);11196 11197 iemInitExec(pVCpu, 0 /*fExecOpts*/);11198 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);11199 Assert(!pVCpu->iem.s.cActiveMappings);11200 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11201 }11202 11203 11204 /**11205 * Interface for HM and EM to emulate the MWAIT instruction.11206 *11207 * @returns Strict VBox status code.11208 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11209 *11210 * @param pVCpu The cross context virtual CPU structure.11211 * @param cbInstr The instruction length in bytes.11212 *11213 * @remarks Not all of the state needs to be synced in.11214 */11215 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)11216 {11217 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11218 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);11219 11220 iemInitExec(pVCpu, 0 /*fExecOpts*/);11221 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);11222 Assert(!pVCpu->iem.s.cActiveMappings);11223 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11224 }11225 11226 11227 /**11228 * Interface for HM and EM to emulate the HLT instruction.11229 *11230 * @returns Strict VBox status code.11231 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11232 *11233 * @param pVCpu The cross context virtual CPU structure.11234 * @param cbInstr The instruction length in bytes.11235 *11236 * @remarks Not all of the state needs to be synced in.11237 */11238 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)11239 {11240 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);11241 11242 iemInitExec(pVCpu, 0 /*fExecOpts*/);11243 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);11244 Assert(!pVCpu->iem.s.cActiveMappings);11245 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11246 }11247 11248 11249 /**11250 * Checks if IEM is in the process of delivering an event (interrupt or11251 * exception).11252 *11253 * @returns true if we're in the process of raising an interrupt or exception,11254 * false otherwise.11255 * @param pVCpu The cross context virtual CPU structure.11256 * @param puVector Where to store the vector associated with the11257 * currently delivered event, optional.11258 * @param pfFlags Where to store th event delivery flags (see11259 * IEM_XCPT_FLAGS_XXX), optional.11260 * @param puErr Where to store the error code associated with the11261 * event, optional.11262 * @param puCr2 Where to store the CR2 associated with the event,11263 * optional.11264 * @remarks The caller should check the flags to determine if the error code and11265 * CR2 are valid for the event.11266 */11267 VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)11268 {11269 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;11270 if (fRaisingXcpt)11271 {11272 if (puVector)11273 *puVector = pVCpu->iem.s.uCurXcpt;11274 if (pfFlags)11275 *pfFlags = pVCpu->iem.s.fCurXcpt;11276 if (puErr)11277 *puErr = pVCpu->iem.s.uCurXcptErr;11278 if (puCr2)11279 *puCr2 = pVCpu->iem.s.uCurXcptCr2;11280 }11281 return fRaisingXcpt;11282 }11283 11284 #ifdef IN_RING311285 11286 /**11287 * Handles the unlikely and probably fatal merge cases.11288 *11289 * @returns Merged status code.11290 * @param rcStrict Current EM status code.11291 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge11292 * with @a rcStrict.11293 * @param iMemMap The memory mapping index. For error reporting only.11294 * @param pVCpu The cross context virtual CPU structure of the calling11295 * thread, for error reporting only.11296 */11297 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,11298 unsigned iMemMap, PVMCPUCC pVCpu)11299 {11300 if (RT_FAILURE_NP(rcStrict))11301 return rcStrict;11302 11303 if (RT_FAILURE_NP(rcStrictCommit))11304 return rcStrictCommit;11305 11306 if (rcStrict == rcStrictCommit)11307 return rcStrictCommit;11308 11309 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",11310 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,11311 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,11312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,11313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));11314 return VERR_IOM_FF_STATUS_IPE;11315 }11316 11317 11318 /**11319 * Helper for IOMR3ProcessForceFlag.11320 *11321 * @returns Merged status code.11322 * @param rcStrict Current EM status code.11323 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge11324 * with @a rcStrict.11325 * @param iMemMap The memory mapping index. For error reporting only.11326 * @param pVCpu The cross context virtual CPU structure of the calling11327 * thread, for error reporting only.11328 */11329 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)11330 {11331 /* Simple. */11332 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))11333 return rcStrictCommit;11334 11335 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))11336 return rcStrict;11337 11338 /* EM scheduling status codes. */11339 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST11340 && rcStrict <= VINF_EM_LAST))11341 {11342 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST11343 && rcStrictCommit <= VINF_EM_LAST))11344 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;11345 }11346 11347 /* Unlikely */11348 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);11349 }11350 11351 11352 /**11353 * Called by force-flag handling code when VMCPU_FF_IEM is set.11354 *11355 * @returns Merge between @a rcStrict and what the commit operation returned.11356 * @param pVM The cross context VM structure.11357 * @param pVCpu The cross context virtual CPU structure of the calling EMT.11358 * @param rcStrict The status code returned by ring-0 or raw-mode.11359 */11360 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)11361 {11362 /*11363 * Reset the pending commit.11364 */11365 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)11366 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),11367 ("%#x %#x %#x\n",11368 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));11369 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);11370 11371 /*11372 * Commit the pending bounce buffers (usually just one).11373 */11374 unsigned cBufs = 0;11375 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);11376 while (iMemMap-- > 0)11377 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))11378 {11379 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);11380 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);11381 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);11382 11383 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;11384 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;11385 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];11386 11387 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)11388 {11389 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,11390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,11391 pbBuf,11392 cbFirst,11393 PGMACCESSORIGIN_IEM);11394 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);11395 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",11396 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,11397 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));11398 }11399 11400 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)11401 {11402 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,11403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,11404 pbBuf + cbFirst,11405 cbSecond,11406 PGMACCESSORIGIN_IEM);11407 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);11408 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",11409 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,11410 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));11411 }11412 cBufs++;11413 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;11414 }11415 11416 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,11417 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,11418 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));11419 pVCpu->iem.s.cActiveMappings = 0;11420 return rcStrict;11421 }11422 11423 #endif /* IN_RING3 */11424 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllXcpt-x86.cpp
r108195 r108220 1 1 /* $Id$ */ 2 2 /** @file 3 * IEM - Interpreted Execution Manager - All Contexts.3 * IEM - Interpreted Execution Manager - x86 target, exceptions & interrupts. 4 4 */ 5 5 … … 25 25 * SPDX-License-Identifier: GPL-3.0-only 26 26 */ 27 28 29 /** @page pg_iem IEM - Interpreted Execution Manager30 *31 * The interpreted exeuction manager (IEM) is for executing short guest code32 * sequences that are causing too many exits / virtualization traps. It will33 * also be used to interpret single instructions, thus replacing the selective34 * interpreters in EM and IOM.35 *36 * Design goals:37 * - Relatively small footprint, although we favour speed and correctness38 * over size.39 * - Reasonably fast.40 * - Correctly handle lock prefixed instructions.41 * - Complete instruction set - eventually.42 * - Refactorable into a recompiler, maybe.43 * - Replace EMInterpret*.44 *45 * Using the existing disassembler has been considered, however this is thought46 * to conflict with speed as the disassembler chews things a bit too much while47 * leaving us with a somewhat complicated state to interpret afterwards.48 *49 *50 * The current code is very much work in progress. You've been warned!51 *52 *53 * @section sec_iem_fpu_instr FPU Instructions54 *55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the56 * same or equivalent instructions on the host FPU. To make life easy, we also57 * let the FPU prioritize the unmasked exceptions for us. This however, only58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 1359 * for FPU exception delivery, because with CR0.NE=0 there is a window where we60 * can trigger spurious FPU exceptions.61 *62 * The guest FPU state is not loaded into the host CPU and kept there till we63 * leave IEM because the calling conventions have declared an all year open64 * season on much of the FPU state. For instance an innocent looking call to65 * memcpy might end up using a whole bunch of XMM or MM registers if the66 * particular implementation finds it worthwhile.67 *68 *69 * @section sec_iem_logging Logging70 *71 * The IEM code uses the \"IEM\" log group for the main logging. The different72 * logging levels/flags are generally used for the following purposes:73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.74 * - Flow (LogFlow) : Basic enter/exit IEM state info.75 * - Level 2 (Log2) : ?76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.78 * - Level 5 (Log5) : Decoding details.79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.80 * - Level 7 (Log7) : iret++ execution logging.81 * - Level 8 (Log8) :82 * - Level 9 (Log9) :83 * - Level 10 (Log10): TLBs.84 * - Level 11 (Log11): Unmasked FPU exceptions.85 *86 * The \"IEM_MEM\" log group covers most of memory related details logging,87 * except for errors and exceptions:88 * - Level 1 (Log) : Reads.89 * - Level 2 (Log2) : Read fallbacks.90 * - Level 3 (Log3) : MemMap read.91 * - Level 4 (Log4) : MemMap read fallbacks.92 * - Level 5 (Log5) : Writes93 * - Level 6 (Log6) : Write fallbacks.94 * - Level 7 (Log7) : MemMap writes and read-writes.95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.96 * - Level 9 (Log9) : Stack reads.97 * - Level 10 (Log10): Stack read fallbacks.98 * - Level 11 (Log11): Stack writes.99 * - Level 12 (Log12): Stack write fallbacks.100 * - Flow (LogFlow) :101 *102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:103 * - Level 1 (Log) : Errors and other major events.104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)105 * - Level 2 (Log2) : VM exits.106 *107 * The syscall logging level assignments:108 * - Level 1: DOS and BIOS.109 * - Level 2: Windows 3.x110 * - Level 3: Linux.111 */112 113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */114 #ifdef _MSC_VER115 # pragma warning(disable:4505)116 #endif117 27 118 28 … … 191 101 192 102 193 /*********************************************************************************************************************************194 * Internal Functions *195 *********************************************************************************************************************************/196 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,197 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;198 199 200 /**201 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code202 * path.203 *204 * This will also invalidate TLB entries for any pages with active data205 * breakpoints on them.206 *207 * @returns IEM_F_BRK_PENDING_XXX or zero.208 * @param pVCpu The cross context virtual CPU structure of the209 * calling thread.210 *211 * @note Don't call directly, use iemCalcExecDbgFlags instead.212 */213 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)214 {215 uint32_t fExec = 0;216 217 /*218 * Helper for invalidate the data TLB for breakpoint addresses.219 *220 * This is to make sure any access to the page will always trigger a TLB221 * load for as long as the breakpoint is enabled.222 */223 #ifdef IEM_WITH_DATA_TLB224 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \225 RTGCPTR uTagNoRev = (a_uValue); \226 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \227 /** @todo do large page accounting */ \228 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \229 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \230 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \231 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \232 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \233 } while (0)234 #else235 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)236 #endif237 238 /*239 * Process guest breakpoints.240 */241 #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \242 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \243 { \244 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \245 { \246 case X86_DR7_RW_EO: \247 fExec |= IEM_F_PENDING_BRK_INSTR; \248 break; \249 case X86_DR7_RW_WO: \250 case X86_DR7_RW_RW: \251 fExec |= IEM_F_PENDING_BRK_DATA; \252 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \253 break; \254 case X86_DR7_RW_IO: \255 fExec |= IEM_F_PENDING_BRK_X86_IO; \256 break; \257 } \258 } \259 } while (0)260 261 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];262 if (fGstDr7 & X86_DR7_ENABLED_MASK)263 {264 /** @todo extract more details here to simplify matching later. */265 #ifdef IEM_WITH_DATA_TLB266 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);267 #endif268 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);269 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);270 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);271 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);272 }273 274 /*275 * Process hypervisor breakpoints.276 */277 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);278 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);279 if (fHyperDr7 & X86_DR7_ENABLED_MASK)280 {281 /** @todo extract more details here to simplify matching later. */282 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));283 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));284 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));285 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));286 }287 288 return fExec;289 }290 291 292 /**293 * Initializes the decoder state.294 *295 * iemReInitDecoder is mostly a copy of this function.296 *297 * @param pVCpu The cross context virtual CPU structure of the298 * calling thread.299 * @param fExecOpts Optional execution flags:300 * - IEM_F_BYPASS_HANDLERS301 * - IEM_F_X86_DISREGARD_LOCK302 */303 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)304 {305 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);306 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));313 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));314 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));315 316 /* Execution state: */317 uint32_t fExec;318 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;319 320 /* Decoder state: */321 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */322 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;323 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)324 {325 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */326 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;327 }328 else329 {330 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;331 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;332 }333 pVCpu->iem.s.fPrefixes = 0;334 pVCpu->iem.s.uRexReg = 0;335 pVCpu->iem.s.uRexB = 0;336 pVCpu->iem.s.uRexIndex = 0;337 pVCpu->iem.s.idxPrefix = 0;338 pVCpu->iem.s.uVex3rdReg = 0;339 pVCpu->iem.s.uVexLength = 0;340 pVCpu->iem.s.fEvexStuff = 0;341 pVCpu->iem.s.iEffSeg = X86_SREG_DS;342 #ifdef IEM_WITH_CODE_TLB343 pVCpu->iem.s.pbInstrBuf = NULL;344 pVCpu->iem.s.offInstrNextByte = 0;345 pVCpu->iem.s.offCurInstrStart = 0;346 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF347 pVCpu->iem.s.offOpcode = 0;348 # endif349 # ifdef VBOX_STRICT350 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;351 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;352 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;353 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);354 # endif355 #else356 pVCpu->iem.s.offOpcode = 0;357 pVCpu->iem.s.cbOpcode = 0;358 #endif359 pVCpu->iem.s.offModRm = 0;360 pVCpu->iem.s.cActiveMappings = 0;361 pVCpu->iem.s.iNextMapping = 0;362 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;363 364 #ifdef DBGFTRACE_ENABLED365 switch (IEM_GET_CPU_MODE(pVCpu))366 {367 case IEMMODE_64BIT:368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);369 break;370 case IEMMODE_32BIT:371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);372 break;373 case IEMMODE_16BIT:374 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);375 break;376 }377 #endif378 }379 380 381 /**382 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.383 *384 * This is mostly a copy of iemInitDecoder.385 *386 * @param pVCpu The cross context virtual CPU structure of the calling EMT.387 */388 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)389 {390 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));399 400 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */401 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),402 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));403 404 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);405 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */406 pVCpu->iem.s.enmEffAddrMode = enmMode;407 if (enmMode != IEMMODE_64BIT)408 {409 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */410 pVCpu->iem.s.enmEffOpSize = enmMode;411 }412 else413 {414 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;415 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;416 }417 pVCpu->iem.s.fPrefixes = 0;418 pVCpu->iem.s.uRexReg = 0;419 pVCpu->iem.s.uRexB = 0;420 pVCpu->iem.s.uRexIndex = 0;421 pVCpu->iem.s.idxPrefix = 0;422 pVCpu->iem.s.uVex3rdReg = 0;423 pVCpu->iem.s.uVexLength = 0;424 pVCpu->iem.s.fEvexStuff = 0;425 pVCpu->iem.s.iEffSeg = X86_SREG_DS;426 #ifdef IEM_WITH_CODE_TLB427 if (pVCpu->iem.s.pbInstrBuf)428 {429 uint64_t off = (enmMode == IEMMODE_64BIT430 ? pVCpu->cpum.GstCtx.rip431 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)432 - pVCpu->iem.s.uInstrBufPc;433 if (off < pVCpu->iem.s.cbInstrBufTotal)434 {435 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;436 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;437 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)438 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;439 else440 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;441 }442 else443 {444 pVCpu->iem.s.pbInstrBuf = NULL;445 pVCpu->iem.s.offInstrNextByte = 0;446 pVCpu->iem.s.offCurInstrStart = 0;447 pVCpu->iem.s.cbInstrBuf = 0;448 pVCpu->iem.s.cbInstrBufTotal = 0;449 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;450 }451 }452 else453 {454 pVCpu->iem.s.offInstrNextByte = 0;455 pVCpu->iem.s.offCurInstrStart = 0;456 pVCpu->iem.s.cbInstrBuf = 0;457 pVCpu->iem.s.cbInstrBufTotal = 0;458 # ifdef VBOX_STRICT459 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;460 # endif461 }462 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF463 pVCpu->iem.s.offOpcode = 0;464 # endif465 #else /* !IEM_WITH_CODE_TLB */466 pVCpu->iem.s.cbOpcode = 0;467 pVCpu->iem.s.offOpcode = 0;468 #endif /* !IEM_WITH_CODE_TLB */469 pVCpu->iem.s.offModRm = 0;470 Assert(pVCpu->iem.s.cActiveMappings == 0);471 pVCpu->iem.s.iNextMapping = 0;472 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);473 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));474 475 #ifdef DBGFTRACE_ENABLED476 switch (enmMode)477 {478 case IEMMODE_64BIT:479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);480 break;481 case IEMMODE_32BIT:482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);483 break;484 case IEMMODE_16BIT:485 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);486 break;487 }488 #endif489 }490 491 492 493 /**494 * Prefetch opcodes the first time when starting executing.495 *496 * @returns Strict VBox status code.497 * @param pVCpu The cross context virtual CPU structure of the498 * calling thread.499 * @param fExecOpts Optional execution flags:500 * - IEM_F_BYPASS_HANDLERS501 * - IEM_F_X86_DISREGARD_LOCK502 */503 static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT504 {505 iemInitDecoder(pVCpu, fExecOpts);506 507 #ifndef IEM_WITH_CODE_TLB508 /*509 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.510 *511 * First translate CS:rIP to a physical address.512 *513 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch514 * all relevant bytes from the first page, as it ASSUMES it's only ever515 * called for dealing with CS.LIM, page crossing and instructions that516 * are too long.517 */518 uint32_t cbToTryRead;519 RTGCPTR GCPtrPC;520 if (IEM_IS_64BIT_CODE(pVCpu))521 {522 cbToTryRead = GUEST_PAGE_SIZE;523 GCPtrPC = pVCpu->cpum.GstCtx.rip;524 if (IEM_IS_CANONICAL(GCPtrPC))525 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);526 else527 return iemRaiseGeneralProtectionFault0(pVCpu);528 }529 else530 {531 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;532 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));533 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)534 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;535 else536 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);537 if (cbToTryRead) { /* likely */ }538 else /* overflowed */539 {540 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);541 cbToTryRead = UINT32_MAX;542 }543 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;544 Assert(GCPtrPC <= UINT32_MAX);545 }546 547 PGMPTWALKFAST WalkFast;548 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,549 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,550 &WalkFast);551 if (RT_SUCCESS(rc))552 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);553 else554 {555 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));556 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT557 /** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't558 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */559 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)560 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);561 # endif562 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);563 }564 #if 0565 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }566 else567 {568 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));569 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT570 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/571 # error completely wrong572 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)573 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);574 # endif575 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);576 }577 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }578 else579 {580 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));581 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT582 /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/583 # error completely wrong.584 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)585 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);586 # endif587 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);588 }589 #else590 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);591 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));592 #endif593 RTGCPHYS const GCPhys = WalkFast.GCPhys;594 595 /*596 * Read the bytes at this address.597 */598 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);599 if (cbToTryRead > cbLeftOnPage)600 cbToTryRead = cbLeftOnPage;601 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))602 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);603 604 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))605 {606 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);607 if (RT_LIKELY(rcStrict == VINF_SUCCESS))608 { /* likely */ }609 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))610 {611 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",612 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));613 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);614 }615 else616 {617 Log((RT_SUCCESS(rcStrict)618 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"619 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",620 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));621 return rcStrict;622 }623 }624 else625 {626 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);627 if (RT_SUCCESS(rc))628 { /* likely */ }629 else630 {631 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",632 GCPtrPC, GCPhys, rc, cbToTryRead));633 return rc;634 }635 }636 pVCpu->iem.s.cbOpcode = cbToTryRead;637 #endif /* !IEM_WITH_CODE_TLB */638 return VINF_SUCCESS;639 }640 641 642 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)643 /**644 * Helper for doing large page accounting at TLB load time.645 */646 template<bool const a_fGlobal>647 DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)648 {649 if (a_fGlobal)650 pTlb->cTlbGlobalLargePageCurLoads++;651 else652 pTlb->cTlbNonGlobalLargePageCurLoads++;653 654 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP655 RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;656 ASMBitSet(pTlb->bmLargePage, idxBit);657 # endif658 659 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);660 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;661 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal662 ? &pTlb->GlobalLargePageRange663 : &pTlb->NonGlobalLargePageRange;664 uTagNoRev &= ~(RTGCPTR)fMask;665 if (uTagNoRev < pRange->uFirstTag)666 pRange->uFirstTag = uTagNoRev;667 668 uTagNoRev |= fMask;669 if (uTagNoRev > pRange->uLastTag)670 pRange->uLastTag = uTagNoRev;671 672 RT_NOREF_PV(pVCpu);673 }674 #endif675 676 677 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)678 /**679 * Worker for iemTlbInvalidateAll.680 */681 template<bool a_fGlobal>682 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)683 {684 if (!a_fGlobal)685 pTlb->cTlsFlushes++;686 else687 pTlb->cTlsGlobalFlushes++;688 689 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;690 if (RT_LIKELY(pTlb->uTlbRevision != 0))691 { /* very likely */ }692 else693 {694 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;695 pTlb->cTlbRevisionRollovers++;696 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;697 while (i-- > 0)698 pTlb->aEntries[i * 2].uTag = 0;699 }700 701 pTlb->cTlbNonGlobalLargePageCurLoads = 0;702 pTlb->NonGlobalLargePageRange.uLastTag = 0;703 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;704 705 if (a_fGlobal)706 {707 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;708 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))709 { /* very likely */ }710 else711 {712 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;713 pTlb->cTlbRevisionRollovers++;714 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;715 while (i-- > 0)716 pTlb->aEntries[i * 2 + 1].uTag = 0;717 }718 719 pTlb->cTlbGlobalLargePageCurLoads = 0;720 pTlb->GlobalLargePageRange.uLastTag = 0;721 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;722 }723 }724 #endif725 726 727 /**728 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.729 */730 template<bool a_fGlobal>731 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)732 {733 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)734 Log10(("IEMTlbInvalidateAll\n"));735 736 # ifdef IEM_WITH_CODE_TLB737 pVCpu->iem.s.cbInstrBufTotal = 0;738 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);739 if (a_fGlobal)740 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);741 else742 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);743 # endif744 745 # ifdef IEM_WITH_DATA_TLB746 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);747 if (a_fGlobal)748 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);749 else750 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);751 # endif752 #else753 RT_NOREF(pVCpu);754 #endif755 }756 757 758 /**759 * Invalidates non-global the IEM TLB entries.760 *761 * This is called internally as well as by PGM when moving GC mappings.762 *763 * @param pVCpu The cross context virtual CPU structure of the calling764 * thread.765 */766 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)767 {768 iemTlbInvalidateAll<false>(pVCpu);769 }770 771 772 /**773 * Invalidates all the IEM TLB entries.774 *775 * This is called internally as well as by PGM when moving GC mappings.776 *777 * @param pVCpu The cross context virtual CPU structure of the calling778 * thread.779 */780 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)781 {782 iemTlbInvalidateAll<true>(pVCpu);783 }784 785 786 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)787 788 /** @todo graduate this to cdefs.h or asm-mem.h. */789 # ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */790 # undef RT_CACHELINE_SIZE791 # define RT_CACHELINE_SIZE 128792 # endif793 794 # if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))795 # define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)796 # elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))797 # define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))798 # elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)799 # define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)800 # else801 # define MY_PREFETCH(a_pvAddr) ((void)0)802 # endif803 # if 0804 # undef MY_PREFETCH805 # define MY_PREFETCH(a_pvAddr) ((void)0)806 # endif807 808 /** @def MY_PREFETCH_64809 * 64 byte prefetch hint, could be more depending on cache line size. */810 /** @def MY_PREFETCH_128811 * 128 byte prefetch hint. */812 /** @def MY_PREFETCH_256813 * 256 byte prefetch hint. */814 # if RT_CACHELINE_SIZE >= 128815 /* 128 byte cache lines */816 # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)817 # define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)818 # define MY_PREFETCH_256(a_pvAddr) do { \819 MY_PREFETCH(a_pvAddr); \820 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \821 } while (0)822 # else823 /* 64 byte cache lines */824 # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)825 # define MY_PREFETCH_128(a_pvAddr) do { \826 MY_PREFETCH(a_pvAddr); \827 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \828 } while (0)829 # define MY_PREFETCH_256(a_pvAddr) do { \830 MY_PREFETCH(a_pvAddr); \831 MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \832 MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \833 MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \834 } while (0)835 # endif836 837 template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>838 DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,839 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT840 {841 IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);842 AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */843 844 if (a_fGlobal)845 pTlb->cTlbInvlPgLargeGlobal += 1;846 if (a_fNonGlobal)847 pTlb->cTlbInvlPgLargeNonGlobal += 1;848 849 /*850 * Set up the scan.851 *852 * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map853 * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]854 * values are for the range 0-1MB, or slots 0-256. So, we construct a mask855 * that fold large page offsets 1MB-2MB into the 0-1MB range.856 *857 * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff858 *859 * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for860 * relevant host architectures.861 */862 /** @todo benchmark this code from the guest side. */863 bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);864 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP865 uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;866 uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64867 : IEMTLB_ENTRY_COUNT * 2 / 64;868 #else869 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;870 MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);871 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;872 #endif873 RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0874 : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)875 & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));876 877 /*878 * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.879 * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.880 */881 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);882 if ( !a_fDataTlb883 && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))884 pVCpu->iem.s.cbInstrBufTotal = 0;885 886 /*887 * Combine TAG values with the TLB revisions.888 */889 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;890 if (a_fNonGlobal)891 GCPtrTag |= pTlb->uTlbRevision;892 893 /*894 * Do the scanning.895 */896 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP897 uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX898 : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);899 /* Scan bitmap entries (64 bits at the time): */900 for (;;)901 {902 # if 1903 uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;904 if (bmEntry)905 {906 /* Scan the non-zero 64-bit value in groups of 8 bits: */907 uint64_t bmToClear = 0;908 uintptr_t idxEven = idxBitmap * 64;909 uint32_t idxTag = 0;910 for (;;)911 {912 if (bmEntry & 0xff)913 {914 # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \915 if (a_fNonGlobal) \916 { \917 if (bmEntry & a_bmNonGlobal) \918 { \919 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \920 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \921 { \922 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \923 pTlb->aEntries[a_idxEvenIter].GCPhys, \924 a_idxEvenIter, a_fDataTlb); \925 pTlb->aEntries[a_idxEvenIter].uTag = 0; \926 bmToClearSub8 |= a_bmNonGlobal; \927 } \928 } \929 else \930 Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\931 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \932 != (GCPtrTag & IEMTLB_REVISION_MASK)); \933 } \934 if (a_fGlobal) \935 { \936 if (bmEntry & a_bmGlobal) \937 { \938 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \939 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \940 { \941 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \942 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \943 a_idxEvenIter + 1, a_fDataTlb); \944 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \945 bmToClearSub8 |= a_bmGlobal; \946 } \947 } \948 else \949 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\950 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \951 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \952 }953 uint64_t bmToClearSub8 = 0;954 ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)955 ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)956 ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)957 ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)958 bmToClear |= bmToClearSub8 << (idxTag * 2);959 # undef ONE_PAIR960 }961 962 /* advance to the next 8 bits. */963 bmEntry >>= 8;964 if (!bmEntry)965 break;966 idxEven += 8;967 idxTag += 4;968 }969 970 /* Clear the large page flags we covered. */971 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;972 }973 # else974 uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;975 if (bmEntry)976 {977 /* Scan the non-zero 64-bit value completely unrolled: */978 uintptr_t const idxEven = idxBitmap * 64;979 uint64_t bmToClear = 0;980 # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \981 if (a_fNonGlobal) \982 { \983 if (bmEntry & a_bmNonGlobal) \984 { \985 Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \986 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \987 { \988 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \989 pTlb->aEntries[a_idxEvenIter].GCPhys, \990 a_idxEvenIter, a_fDataTlb); \991 pTlb->aEntries[a_idxEvenIter].uTag = 0; \992 bmToClear |= a_bmNonGlobal; \993 } \994 } \995 else \996 Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\997 || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \998 != (GCPtrTag & IEMTLB_REVISION_MASK)); \999 } \1000 if (a_fGlobal) \1001 { \1002 if (bmEntry & a_bmGlobal) \1003 { \1004 Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \1005 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \1006 { \1007 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \1008 pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \1009 a_idxEvenIter + 1, a_fDataTlb); \1010 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \1011 bmToClear |= a_bmGlobal; \1012 } \1013 } \1014 else \1015 Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\1016 || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \1017 != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \1018 } ((void)0)1019 # define FOUR_PAIRS(a_iByte, a_cShift) \1020 ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \1021 ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \1022 ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \1023 ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)1024 if (bmEntry & (uint32_t)UINT16_MAX)1025 {1026 FOUR_PAIRS(0, 0);1027 FOUR_PAIRS(1, 8);1028 }1029 if (bmEntry & ((uint32_t)UINT16_MAX << 16))1030 {1031 FOUR_PAIRS(2, 16);1032 FOUR_PAIRS(3, 24);1033 }1034 if (bmEntry & ((uint64_t)UINT16_MAX << 32))1035 {1036 FOUR_PAIRS(4, 32);1037 FOUR_PAIRS(5, 40);1038 }1039 if (bmEntry & ((uint64_t)UINT16_MAX << 16))1040 {1041 FOUR_PAIRS(6, 48);1042 FOUR_PAIRS(7, 56);1043 }1044 # undef FOUR_PAIRS1045 1046 /* Clear the large page flags we covered. */1047 pTlb->bmLargePage[idxBitmap] &= ~bmToClear;1048 }1049 # endif1050 1051 /* advance */1052 idxBitmap++;1053 if (idxBitmap >= idxBitmapEnd)1054 break;1055 if (a_fNonGlobal)1056 GCPtrTag += 32;1057 if (a_fGlobal)1058 GCPtrTagGlob += 32;1059 }1060 1061 #else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */1062 1063 for (; idxEven < idxEvenEnd; idxEven += 8)1064 {1065 # define ONE_ITERATION(a_idxEvenIter) \1066 if (a_fNonGlobal) \1067 { \1068 if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \1069 { \1070 if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \1071 { \1072 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \1073 a_idxEvenIter, a_fDataTlb); \1074 pTlb->aEntries[a_idxEvenIter].uTag = 0; \1075 } \1076 } \1077 GCPtrTag++; \1078 } \1079 \1080 if (a_fGlobal) \1081 { \1082 if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \1083 { \1084 if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \1085 { \1086 IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \1087 a_idxEvenIter + 1, a_fDataTlb); \1088 pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \1089 } \1090 } \1091 GCPtrTagGlob++; \1092 }1093 if (idxEven < idxEvenEnd - 4)1094 MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);1095 ONE_ITERATION(idxEven)1096 ONE_ITERATION(idxEven + 2)1097 ONE_ITERATION(idxEven + 4)1098 ONE_ITERATION(idxEven + 6)1099 # undef ONE_ITERATION1100 }1101 #endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */1102 }1103 1104 template<bool const a_fDataTlb, bool const a_f2MbLargePage>1105 DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,1106 RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT1107 {1108 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);1109 1110 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);1111 if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag1112 && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)1113 {1114 if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag1115 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)1116 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1117 else1118 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1119 }1120 else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag1121 || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)1122 {1123 /* Large pages aren't as likely in the non-global TLB half. */1124 IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);1125 }1126 else1127 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1128 }1129 1130 template<bool const a_fDataTlb>1131 DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT1132 {1133 pTlb->cTlbInvlPg += 1;1134 1135 /*1136 * Flush the entry pair.1137 */1138 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))1139 {1140 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);1141 pTlb->aEntries[idxEven].uTag = 0;1142 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))1143 pVCpu->iem.s.cbInstrBufTotal = 0;1144 }1145 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))1146 {1147 IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);1148 pTlb->aEntries[idxEven + 1].uTag = 0;1149 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))1150 pVCpu->iem.s.cbInstrBufTotal = 0;1151 }1152 1153 /*1154 * If there are (or has been) large pages in the TLB, we must check if the1155 * address being flushed may involve one of those, as then we'd have to1156 * scan for entries relating to the same page and flush those as well.1157 */1158 # if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */1159 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)1160 # else1161 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)1162 # endif1163 {1164 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);1165 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)1166 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1167 else1168 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);1169 }1170 }1171 1172 #endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */1173 1174 /**1175 * Invalidates a page in the TLBs.1176 *1177 * @param pVCpu The cross context virtual CPU structure of the calling1178 * thread.1179 * @param GCPtr The address of the page to invalidate1180 * @thread EMT(pVCpu)1181 */1182 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)1183 {1184 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);1185 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1186 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));1187 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);1188 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));1189 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);1190 1191 # ifdef IEM_WITH_CODE_TLB1192 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);1193 # endif1194 # ifdef IEM_WITH_DATA_TLB1195 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);1196 # endif1197 #else1198 NOREF(pVCpu); NOREF(GCPtr);1199 #endif1200 }1201 1202 1203 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1204 /**1205 * Invalid both TLBs slow fashion following a rollover.1206 *1207 * Worker for IEMTlbInvalidateAllPhysical,1208 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,1209 * iemMemMapJmp and others.1210 *1211 * @thread EMT(pVCpu)1212 */1213 static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)1214 {1215 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));1216 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);1217 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);1218 1219 unsigned i;1220 # ifdef IEM_WITH_CODE_TLB1221 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);1222 while (i-- > 0)1223 {1224 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;1225 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ1226 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);1227 }1228 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;1229 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1230 # endif1231 # ifdef IEM_WITH_DATA_TLB1232 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);1233 while (i-- > 0)1234 {1235 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;1236 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ1237 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);1238 }1239 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;1240 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1241 # endif1242 1243 }1244 #endif1245 1246 1247 /**1248 * Invalidates the host physical aspects of the IEM TLBs.1249 *1250 * This is called internally as well as by PGM when moving GC mappings.1251 *1252 * @param pVCpu The cross context virtual CPU structure of the calling1253 * thread.1254 * @note Currently not used.1255 */1256 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)1257 {1258 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1259 /* Note! This probably won't end up looking exactly like this, but it give an idea... */1260 Log10(("IEMTlbInvalidateAllPhysical\n"));1261 1262 # ifdef IEM_WITH_CODE_TLB1263 pVCpu->iem.s.cbInstrBufTotal = 0;1264 # endif1265 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;1266 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))1267 {1268 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;1269 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1270 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;1271 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1272 }1273 else1274 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1275 #else1276 NOREF(pVCpu);1277 #endif1278 }1279 1280 1281 /**1282 * Invalidates the host physical aspects of the IEM TLBs.1283 *1284 * This is called internally as well as by PGM when moving GC mappings.1285 *1286 * @param pVM The cross context VM structure.1287 * @param idCpuCaller The ID of the calling EMT if available to the caller,1288 * otherwise NIL_VMCPUID.1289 * @param enmReason The reason we're called.1290 *1291 * @remarks Caller holds the PGM lock.1292 */1293 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)1294 {1295 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)1296 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);1297 if (pVCpuCaller)1298 VMCPU_ASSERT_EMT(pVCpuCaller);1299 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);1300 1301 VMCC_FOR_EACH_VMCPU(pVM)1302 {1303 # ifdef IEM_WITH_CODE_TLB1304 if (pVCpuCaller == pVCpu)1305 pVCpu->iem.s.cbInstrBufTotal = 0;1306 # endif1307 1308 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);1309 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;1310 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))1311 { /* likely */}1312 else if (pVCpuCaller != pVCpu)1313 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;1314 else1315 {1316 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1317 continue;1318 }1319 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))1320 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;1321 1322 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))1323 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;1324 }1325 VMCC_FOR_EACH_VMCPU_END(pVM);1326 1327 #else1328 RT_NOREF(pVM, idCpuCaller, enmReason);1329 #endif1330 }1331 1332 1333 /**1334 * Flushes the prefetch buffer, light version.1335 */1336 void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)1337 {1338 #ifndef IEM_WITH_CODE_TLB1339 pVCpu->iem.s.cbOpcode = cbInstr;1340 #else1341 RT_NOREF(pVCpu, cbInstr);1342 #endif1343 }1344 1345 1346 /**1347 * Flushes the prefetch buffer, heavy version.1348 */1349 void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)1350 {1351 #ifndef IEM_WITH_CODE_TLB1352 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */1353 #elif 11354 pVCpu->iem.s.cbInstrBufTotal = 0;1355 RT_NOREF(cbInstr);1356 #else1357 RT_NOREF(pVCpu, cbInstr);1358 #endif1359 }1360 1361 1362 1363 #ifdef IEM_WITH_CODE_TLB1364 1365 /**1366 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on1367 * failure and jumps.1368 *1369 * We end up here for a number of reasons:1370 * - pbInstrBuf isn't yet initialized.1371 * - Advancing beyond the buffer boundrary (e.g. cross page).1372 * - Advancing beyond the CS segment limit.1373 * - Fetching from non-mappable page (e.g. MMIO).1374 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).1375 *1376 * @param pVCpu The cross context virtual CPU structure of the1377 * calling thread.1378 * @param pvDst Where to return the bytes.1379 * @param cbDst Number of bytes to read. A value of zero is1380 * allowed for initializing pbInstrBuf (the1381 * recompiler does this). In this case it is best1382 * to set pbInstrBuf to NULL prior to the call.1383 */1384 void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP1385 {1386 # ifdef IN_RING31387 for (;;)1388 {1389 Assert(cbDst <= 8);1390 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;1391 1392 /*1393 * We might have a partial buffer match, deal with that first to make the1394 * rest simpler. This is the first part of the cross page/buffer case.1395 */1396 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;1397 if (pbInstrBuf != NULL)1398 {1399 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */1400 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;1401 if (offBuf < cbInstrBuf)1402 {1403 Assert(offBuf + cbDst > cbInstrBuf);1404 uint32_t const cbCopy = cbInstrBuf - offBuf;1405 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);1406 1407 cbDst -= cbCopy;1408 pvDst = (uint8_t *)pvDst + cbCopy;1409 offBuf += cbCopy;1410 }1411 }1412 1413 /*1414 * Check segment limit, figuring how much we're allowed to access at this point.1415 *1416 * We will fault immediately if RIP is past the segment limit / in non-canonical1417 * territory. If we do continue, there are one or more bytes to read before we1418 * end up in trouble and we need to do that first before faulting.1419 */1420 RTGCPTR GCPtrFirst;1421 uint32_t cbMaxRead;1422 if (IEM_IS_64BIT_CODE(pVCpu))1423 {1424 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1425 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))1426 { /* likely */ }1427 else1428 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1429 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);1430 }1431 else1432 {1433 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);1434 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */1435 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))1436 { /* likely */ }1437 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */1438 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1439 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;1440 if (cbMaxRead != 0)1441 { /* likely */ }1442 else1443 {1444 /* Overflowed because address is 0 and limit is max. */1445 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);1446 cbMaxRead = X86_PAGE_SIZE;1447 }1448 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;1449 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);1450 if (cbMaxRead2 < cbMaxRead)1451 cbMaxRead = cbMaxRead2;1452 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */1453 }1454 1455 /*1456 * Get the TLB entry for this piece of code.1457 */1458 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);1459 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);1460 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)1461 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))1462 {1463 /* likely when executing lots of code, otherwise unlikely */1464 # ifdef IEM_WITH_TLB_STATISTICS1465 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;1466 # endif1467 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));1468 1469 /* Check TLB page table level access flags. */1470 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))1471 {1472 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)1473 {1474 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));1475 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1476 }1477 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))1478 {1479 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));1480 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);1481 }1482 }1483 1484 /* Look up the physical page info if necessary. */1485 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)1486 { /* not necessary */ }1487 else1488 {1489 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))1490 { /* likely */ }1491 else1492 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1493 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;1494 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,1495 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);1496 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));1497 }1498 }1499 else1500 {1501 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;1502 1503 /* This page table walking will set A bits as required by the access while performing the walk.1504 ASSUMES these are set when the address is translated rather than on commit... */1505 /** @todo testcase: check when A bits are actually set by the CPU for code. */1506 PGMPTWALKFAST WalkFast;1507 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,1508 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,1509 &WalkFast);1510 if (RT_SUCCESS(rc))1511 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);1512 else1513 {1514 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1515 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */1516 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));1517 # endif1518 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));1519 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);1520 }1521 1522 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);1523 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)1524 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */1525 {1526 pTlbe--;1527 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;1528 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)1529 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));1530 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP1531 else1532 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));1533 # endif1534 }1535 else1536 {1537 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;1538 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;1539 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)1540 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));1541 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP1542 else1543 ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);1544 # endif1545 }1546 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))1547 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/1548 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);1549 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;1550 pTlbe->GCPhys = GCPhysPg;1551 pTlbe->pbMappingR3 = NULL;1552 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));1553 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);1554 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));1555 1556 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))1557 IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);1558 else1559 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);1560 1561 /* Resolve the physical address. */1562 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))1563 { /* likely */ }1564 else1565 IEMTlbInvalidateAllPhysicalSlow(pVCpu);1566 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));1567 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,1568 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);1569 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));1570 }1571 1572 # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */1573 /*1574 * Try do a direct read using the pbMappingR3 pointer.1575 * Note! Do not recheck the physical TLB revision number here as we have the1576 * wrong response to changes in the else case. If someone is updating1577 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine1578 * pretending we always won the race.1579 */1580 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))1581 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)1582 {1583 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);1584 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;1585 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)1586 {1587 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);1588 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;1589 }1590 else1591 {1592 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;1593 if (cbInstr + (uint32_t)cbDst <= 15)1594 {1595 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;1596 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);1597 }1598 else1599 {1600 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",1601 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));1602 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1603 }1604 }1605 if (cbDst <= cbMaxRead)1606 {1607 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */1608 # if 0 /* unused */1609 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;1610 # endif1611 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;1612 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;1613 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;1614 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;1615 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */1616 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);1617 else1618 Assert(!pvDst);1619 return;1620 }1621 pVCpu->iem.s.pbInstrBuf = NULL;1622 1623 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);1624 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;1625 }1626 # else1627 # error "refactor as needed"1628 /*1629 * If there is no special read handling, so we can read a bit more and1630 * put it in the prefetch buffer.1631 */1632 if ( cbDst < cbMaxRead1633 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)1634 {1635 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,1636 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);1637 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1638 { /* likely */ }1639 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1640 {1641 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1642 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1643 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1644 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));1645 }1646 else1647 {1648 Log((RT_SUCCESS(rcStrict)1649 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1650 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1651 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1652 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1653 }1654 }1655 # endif1656 /*1657 * Special read handling, so only read exactly what's needed.1658 * This is a highly unlikely scenario.1659 */1660 else1661 {1662 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;1663 1664 /* Check instruction length. */1665 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;1666 if (RT_LIKELY(cbInstr + cbDst <= 15))1667 { /* likely */ }1668 else1669 {1670 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",1671 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));1672 iemRaiseGeneralProtectionFault0Jmp(pVCpu);1673 }1674 1675 /* Do the reading. */1676 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);1677 if (cbToRead > 0)1678 {1679 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),1680 pvDst, cbToRead, PGMACCESSORIGIN_IEM);1681 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1682 { /* likely */ }1683 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1684 {1685 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1686 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));1687 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1688 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));1689 }1690 else1691 {1692 Log((RT_SUCCESS(rcStrict)1693 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1694 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1695 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));1696 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1697 }1698 }1699 1700 /* Update the state and probably return. */1701 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);1702 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;1703 # if 0 /* unused */1704 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;1705 # endif1706 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);1707 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;1708 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;1709 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */1710 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;1711 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;1712 pVCpu->iem.s.pbInstrBuf = NULL;1713 if (cbToRead == cbDst)1714 return;1715 Assert(cbToRead == cbMaxRead);1716 }1717 1718 /*1719 * More to read, loop.1720 */1721 cbDst -= cbMaxRead;1722 pvDst = (uint8_t *)pvDst + cbMaxRead;1723 }1724 # else /* !IN_RING3 */1725 RT_NOREF(pvDst, cbDst);1726 if (pvDst || cbDst)1727 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);1728 # endif /* !IN_RING3 */1729 }1730 1731 #else /* !IEM_WITH_CODE_TLB */1732 1733 /**1734 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate1735 * exception if it fails.1736 *1737 * @returns Strict VBox status code.1738 * @param pVCpu The cross context virtual CPU structure of the1739 * calling thread.1740 * @param cbMin The minimum number of bytes relative offOpcode1741 * that must be read.1742 */1743 VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT1744 {1745 /*1746 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.1747 *1748 * First translate CS:rIP to a physical address.1749 */1750 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;1751 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;1752 uint8_t const cbLeft = cbOpcode - offOpcode;1753 Assert(cbLeft < cbMin);1754 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));1755 1756 uint32_t cbToTryRead;1757 RTGCPTR GCPtrNext;1758 if (IEM_IS_64BIT_CODE(pVCpu))1759 {1760 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;1761 if (!IEM_IS_CANONICAL(GCPtrNext))1762 return iemRaiseGeneralProtectionFault0(pVCpu);1763 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);1764 }1765 else1766 {1767 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;1768 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */1769 GCPtrNext32 += cbOpcode;1770 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)1771 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */1772 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1773 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;1774 if (!cbToTryRead) /* overflowed */1775 {1776 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);1777 cbToTryRead = UINT32_MAX;1778 /** @todo check out wrapping around the code segment. */1779 }1780 if (cbToTryRead < cbMin - cbLeft)1781 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);1782 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;1783 1784 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);1785 if (cbToTryRead > cbLeftOnPage)1786 cbToTryRead = cbLeftOnPage;1787 }1788 1789 /* Restrict to opcode buffer space.1790 1791 We're making ASSUMPTIONS here based on work done previously in1792 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will1793 be fetched in case of an instruction crossing two pages. */1794 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)1795 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;1796 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))1797 { /* likely */ }1798 else1799 {1800 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",1801 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));1802 return iemRaiseGeneralProtectionFault0(pVCpu);1803 }1804 1805 PGMPTWALKFAST WalkFast;1806 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,1807 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,1808 &WalkFast);1809 if (RT_SUCCESS(rc))1810 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);1811 else1812 {1813 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));1814 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1815 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)1816 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);1817 #endif1818 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);1819 }1820 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);1821 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));1822 1823 RTGCPHYS const GCPhys = WalkFast.GCPhys;1824 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));1825 1826 /*1827 * Read the bytes at this address.1828 *1829 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,1830 * and since PATM should only patch the start of an instruction there1831 * should be no need to check again here.1832 */1833 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))1834 {1835 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],1836 cbToTryRead, PGMACCESSORIGIN_IEM);1837 if (RT_LIKELY(rcStrict == VINF_SUCCESS))1838 { /* likely */ }1839 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))1840 {1841 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",1842 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1843 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);1844 }1845 else1846 {1847 Log((RT_SUCCESS(rcStrict)1848 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"1849 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",1850 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));1851 return rcStrict;1852 }1853 }1854 else1855 {1856 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);1857 if (RT_SUCCESS(rc))1858 { /* likely */ }1859 else1860 {1861 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));1862 return rc;1863 }1864 }1865 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;1866 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));1867 1868 return VINF_SUCCESS;1869 }1870 1871 #endif /* !IEM_WITH_CODE_TLB */1872 #ifndef IEM_WITH_SETJMP1873 1874 /**1875 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.1876 *1877 * @returns Strict VBox status code.1878 * @param pVCpu The cross context virtual CPU structure of the1879 * calling thread.1880 * @param pb Where to return the opcode byte.1881 */1882 VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT1883 {1884 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);1885 if (rcStrict == VINF_SUCCESS)1886 {1887 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1888 *pb = pVCpu->iem.s.abOpcode[offOpcode];1889 pVCpu->iem.s.offOpcode = offOpcode + 1;1890 }1891 else1892 *pb = 0;1893 return rcStrict;1894 }1895 1896 #else /* IEM_WITH_SETJMP */1897 1898 /**1899 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.1900 *1901 * @returns The opcode byte.1902 * @param pVCpu The cross context virtual CPU structure of the calling thread.1903 */1904 uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP1905 {1906 # ifdef IEM_WITH_CODE_TLB1907 uint8_t u8;1908 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);1909 return u8;1910 # else1911 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);1912 if (rcStrict == VINF_SUCCESS)1913 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];1914 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));1915 # endif1916 }1917 1918 #endif /* IEM_WITH_SETJMP */1919 1920 #ifndef IEM_WITH_SETJMP1921 1922 /**1923 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.1924 *1925 * @returns Strict VBox status code.1926 * @param pVCpu The cross context virtual CPU structure of the calling thread.1927 * @param pu16 Where to return the opcode dword.1928 */1929 VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT1930 {1931 uint8_t u8;1932 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1933 if (rcStrict == VINF_SUCCESS)1934 *pu16 = (int8_t)u8;1935 return rcStrict;1936 }1937 1938 1939 /**1940 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.1941 *1942 * @returns Strict VBox status code.1943 * @param pVCpu The cross context virtual CPU structure of the calling thread.1944 * @param pu32 Where to return the opcode dword.1945 */1946 VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT1947 {1948 uint8_t u8;1949 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1950 if (rcStrict == VINF_SUCCESS)1951 *pu32 = (int8_t)u8;1952 return rcStrict;1953 }1954 1955 1956 /**1957 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.1958 *1959 * @returns Strict VBox status code.1960 * @param pVCpu The cross context virtual CPU structure of the calling thread.1961 * @param pu64 Where to return the opcode qword.1962 */1963 VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT1964 {1965 uint8_t u8;1966 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);1967 if (rcStrict == VINF_SUCCESS)1968 *pu64 = (int8_t)u8;1969 return rcStrict;1970 }1971 1972 #endif /* !IEM_WITH_SETJMP */1973 1974 1975 #ifndef IEM_WITH_SETJMP1976 1977 /**1978 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.1979 *1980 * @returns Strict VBox status code.1981 * @param pVCpu The cross context virtual CPU structure of the calling thread.1982 * @param pu16 Where to return the opcode word.1983 */1984 VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT1985 {1986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);1987 if (rcStrict == VINF_SUCCESS)1988 {1989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;1990 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1991 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];1992 # else1993 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);1994 # endif1995 pVCpu->iem.s.offOpcode = offOpcode + 2;1996 }1997 else1998 *pu16 = 0;1999 return rcStrict;2000 }2001 2002 #else /* IEM_WITH_SETJMP */2003 2004 /**2005 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error2006 *2007 * @returns The opcode word.2008 * @param pVCpu The cross context virtual CPU structure of the calling thread.2009 */2010 uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2011 {2012 # ifdef IEM_WITH_CODE_TLB2013 uint16_t u16;2014 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);2015 return u16;2016 # else2017 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2018 if (rcStrict == VINF_SUCCESS)2019 {2020 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2021 pVCpu->iem.s.offOpcode += 2;2022 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2023 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2024 # else2025 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2026 # endif2027 }2028 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2029 # endif2030 }2031 2032 #endif /* IEM_WITH_SETJMP */2033 2034 #ifndef IEM_WITH_SETJMP2035 2036 /**2037 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.2038 *2039 * @returns Strict VBox status code.2040 * @param pVCpu The cross context virtual CPU structure of the calling thread.2041 * @param pu32 Where to return the opcode double word.2042 */2043 VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT2044 {2045 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2046 if (rcStrict == VINF_SUCCESS)2047 {2048 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2049 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2050 pVCpu->iem.s.offOpcode = offOpcode + 2;2051 }2052 else2053 *pu32 = 0;2054 return rcStrict;2055 }2056 2057 2058 /**2059 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.2060 *2061 * @returns Strict VBox status code.2062 * @param pVCpu The cross context virtual CPU structure of the calling thread.2063 * @param pu64 Where to return the opcode quad word.2064 */2065 VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2066 {2067 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);2068 if (rcStrict == VINF_SUCCESS)2069 {2070 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2071 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);2072 pVCpu->iem.s.offOpcode = offOpcode + 2;2073 }2074 else2075 *pu64 = 0;2076 return rcStrict;2077 }2078 2079 #endif /* !IEM_WITH_SETJMP */2080 2081 #ifndef IEM_WITH_SETJMP2082 2083 /**2084 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.2085 *2086 * @returns Strict VBox status code.2087 * @param pVCpu The cross context virtual CPU structure of the calling thread.2088 * @param pu32 Where to return the opcode dword.2089 */2090 VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT2091 {2092 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2093 if (rcStrict == VINF_SUCCESS)2094 {2095 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2096 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2097 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2098 # else2099 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2100 pVCpu->iem.s.abOpcode[offOpcode + 1],2101 pVCpu->iem.s.abOpcode[offOpcode + 2],2102 pVCpu->iem.s.abOpcode[offOpcode + 3]);2103 # endif2104 pVCpu->iem.s.offOpcode = offOpcode + 4;2105 }2106 else2107 *pu32 = 0;2108 return rcStrict;2109 }2110 2111 #else /* IEM_WITH_SETJMP */2112 2113 /**2114 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.2115 *2116 * @returns The opcode dword.2117 * @param pVCpu The cross context virtual CPU structure of the calling thread.2118 */2119 uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2120 {2121 # ifdef IEM_WITH_CODE_TLB2122 uint32_t u32;2123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);2124 return u32;2125 # else2126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2127 if (rcStrict == VINF_SUCCESS)2128 {2129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2130 pVCpu->iem.s.offOpcode = offOpcode + 4;2131 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2132 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2133 # else2134 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2135 pVCpu->iem.s.abOpcode[offOpcode + 1],2136 pVCpu->iem.s.abOpcode[offOpcode + 2],2137 pVCpu->iem.s.abOpcode[offOpcode + 3]);2138 # endif2139 }2140 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2141 # endif2142 }2143 2144 #endif /* IEM_WITH_SETJMP */2145 2146 #ifndef IEM_WITH_SETJMP2147 2148 /**2149 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.2150 *2151 * @returns Strict VBox status code.2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.2153 * @param pu64 Where to return the opcode dword.2154 */2155 VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2156 {2157 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2158 if (rcStrict == VINF_SUCCESS)2159 {2160 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2161 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2162 pVCpu->iem.s.abOpcode[offOpcode + 1],2163 pVCpu->iem.s.abOpcode[offOpcode + 2],2164 pVCpu->iem.s.abOpcode[offOpcode + 3]);2165 pVCpu->iem.s.offOpcode = offOpcode + 4;2166 }2167 else2168 *pu64 = 0;2169 return rcStrict;2170 }2171 2172 2173 /**2174 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.2175 *2176 * @returns Strict VBox status code.2177 * @param pVCpu The cross context virtual CPU structure of the calling thread.2178 * @param pu64 Where to return the opcode qword.2179 */2180 VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2181 {2182 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);2183 if (rcStrict == VINF_SUCCESS)2184 {2185 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2186 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2187 pVCpu->iem.s.abOpcode[offOpcode + 1],2188 pVCpu->iem.s.abOpcode[offOpcode + 2],2189 pVCpu->iem.s.abOpcode[offOpcode + 3]);2190 pVCpu->iem.s.offOpcode = offOpcode + 4;2191 }2192 else2193 *pu64 = 0;2194 return rcStrict;2195 }2196 2197 #endif /* !IEM_WITH_SETJMP */2198 2199 #ifndef IEM_WITH_SETJMP2200 2201 /**2202 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.2203 *2204 * @returns Strict VBox status code.2205 * @param pVCpu The cross context virtual CPU structure of the calling thread.2206 * @param pu64 Where to return the opcode qword.2207 */2208 VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT2209 {2210 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);2211 if (rcStrict == VINF_SUCCESS)2212 {2213 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2214 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2215 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2216 # else2217 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2218 pVCpu->iem.s.abOpcode[offOpcode + 1],2219 pVCpu->iem.s.abOpcode[offOpcode + 2],2220 pVCpu->iem.s.abOpcode[offOpcode + 3],2221 pVCpu->iem.s.abOpcode[offOpcode + 4],2222 pVCpu->iem.s.abOpcode[offOpcode + 5],2223 pVCpu->iem.s.abOpcode[offOpcode + 6],2224 pVCpu->iem.s.abOpcode[offOpcode + 7]);2225 # endif2226 pVCpu->iem.s.offOpcode = offOpcode + 8;2227 }2228 else2229 *pu64 = 0;2230 return rcStrict;2231 }2232 2233 #else /* IEM_WITH_SETJMP */2234 2235 /**2236 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.2237 *2238 * @returns The opcode qword.2239 * @param pVCpu The cross context virtual CPU structure of the calling thread.2240 */2241 uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP2242 {2243 # ifdef IEM_WITH_CODE_TLB2244 uint64_t u64;2245 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);2246 return u64;2247 # else2248 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);2249 if (rcStrict == VINF_SUCCESS)2250 {2251 uint8_t offOpcode = pVCpu->iem.s.offOpcode;2252 pVCpu->iem.s.offOpcode = offOpcode + 8;2253 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS2254 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];2255 # else2256 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],2257 pVCpu->iem.s.abOpcode[offOpcode + 1],2258 pVCpu->iem.s.abOpcode[offOpcode + 2],2259 pVCpu->iem.s.abOpcode[offOpcode + 3],2260 pVCpu->iem.s.abOpcode[offOpcode + 4],2261 pVCpu->iem.s.abOpcode[offOpcode + 5],2262 pVCpu->iem.s.abOpcode[offOpcode + 6],2263 pVCpu->iem.s.abOpcode[offOpcode + 7]);2264 # endif2265 }2266 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));2267 # endif2268 }2269 2270 #endif /* IEM_WITH_SETJMP */2271 2272 2273 2274 103 /** @name Misc Worker Functions. 2275 104 * @{ … … 5210 3039 5211 3040 5212 /** @} */5213 5214 /** @name Common opcode decoders.5215 * @{5216 */5217 //#include <iprt/mem.h>5218 5219 /**5220 * Used to add extra details about a stub case.5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.5222 */5223 void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT5224 {5225 #if defined(LOG_ENABLED) && defined(IN_RING3)5226 PVM pVM = pVCpu->CTX_SUFF(pVM);5227 char szRegs[4096];5228 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),5229 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"5230 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"5231 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"5232 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"5233 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"5234 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"5235 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"5236 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"5237 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"5238 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"5239 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"5240 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"5241 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"5242 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"5243 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"5244 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"5245 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"5246 " efer=%016VR{efer}\n"5247 " pat=%016VR{pat}\n"5248 " sf_mask=%016VR{sf_mask}\n"5249 "krnl_gs_base=%016VR{krnl_gs_base}\n"5250 " lstar=%016VR{lstar}\n"5251 " star=%016VR{star} cstar=%016VR{cstar}\n"5252 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"5253 );5254 5255 char szInstr[256];5256 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,5257 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,5258 szInstr, sizeof(szInstr), NULL);5259 5260 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);5261 #else5262 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);5263 #endif5264 }5265 5266 /** @} */5267 5268 5269 5270 /** @name Register Access.5271 * @{5272 */5273 5274 /**5275 * Adds a 8-bit signed jump offset to RIP/EIP/IP.5276 *5277 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5278 * segment limit.5279 *5280 * @param pVCpu The cross context virtual CPU structure of the calling thread.5281 * @param cbInstr Instruction size.5282 * @param offNextInstr The offset of the next instruction.5283 * @param enmEffOpSize Effective operand size.5284 */5285 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,5286 IEMMODE enmEffOpSize) RT_NOEXCEPT5287 {5288 switch (enmEffOpSize)5289 {5290 case IEMMODE_16BIT:5291 {5292 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;5293 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit5294 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))5295 pVCpu->cpum.GstCtx.rip = uNewIp;5296 else5297 return iemRaiseGeneralProtectionFault0(pVCpu);5298 break;5299 }5300 5301 case IEMMODE_32BIT:5302 {5303 Assert(!IEM_IS_64BIT_CODE(pVCpu));5304 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);5305 5306 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;5307 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))5308 pVCpu->cpum.GstCtx.rip = uNewEip;5309 else5310 return iemRaiseGeneralProtectionFault0(pVCpu);5311 break;5312 }5313 5314 case IEMMODE_64BIT:5315 {5316 Assert(IEM_IS_64BIT_CODE(pVCpu));5317 5318 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;5319 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))5320 pVCpu->cpum.GstCtx.rip = uNewRip;5321 else5322 return iemRaiseGeneralProtectionFault0(pVCpu);5323 break;5324 }5325 5326 IEM_NOT_REACHED_DEFAULT_CASE_RET();5327 }5328 5329 #ifndef IEM_WITH_CODE_TLB5330 /* Flush the prefetch buffer. */5331 pVCpu->iem.s.cbOpcode = cbInstr;5332 #endif5333 5334 /*5335 * Clear RF and finish the instruction (maybe raise #DB).5336 */5337 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5338 }5339 5340 5341 /**5342 * Adds a 16-bit signed jump offset to RIP/EIP/IP.5343 *5344 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5345 * segment limit.5346 *5347 * @returns Strict VBox status code.5348 * @param pVCpu The cross context virtual CPU structure of the calling thread.5349 * @param cbInstr Instruction size.5350 * @param offNextInstr The offset of the next instruction.5351 */5352 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT5353 {5354 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);5355 5356 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;5357 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit5358 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))5359 pVCpu->cpum.GstCtx.rip = uNewIp;5360 else5361 return iemRaiseGeneralProtectionFault0(pVCpu);5362 5363 #ifndef IEM_WITH_CODE_TLB5364 /* Flush the prefetch buffer. */5365 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);5366 #endif5367 5368 /*5369 * Clear RF and finish the instruction (maybe raise #DB).5370 */5371 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5372 }5373 5374 5375 /**5376 * Adds a 32-bit signed jump offset to RIP/EIP/IP.5377 *5378 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code5379 * segment limit.5380 *5381 * @returns Strict VBox status code.5382 * @param pVCpu The cross context virtual CPU structure of the calling thread.5383 * @param cbInstr Instruction size.5384 * @param offNextInstr The offset of the next instruction.5385 * @param enmEffOpSize Effective operand size.5386 */5387 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,5388 IEMMODE enmEffOpSize) RT_NOEXCEPT5389 {5390 if (enmEffOpSize == IEMMODE_32BIT)5391 {5392 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));5393 5394 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;5395 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))5396 pVCpu->cpum.GstCtx.rip = uNewEip;5397 else5398 return iemRaiseGeneralProtectionFault0(pVCpu);5399 }5400 else5401 {5402 Assert(enmEffOpSize == IEMMODE_64BIT);5403 5404 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;5405 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))5406 pVCpu->cpum.GstCtx.rip = uNewRip;5407 else5408 return iemRaiseGeneralProtectionFault0(pVCpu);5409 }5410 5411 #ifndef IEM_WITH_CODE_TLB5412 /* Flush the prefetch buffer. */5413 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);5414 #endif5415 5416 /*5417 * Clear RF and finish the instruction (maybe raise #DB).5418 */5419 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);5420 }5421 5422 /** @} */5423 5424 5425 /** @name FPU access and helpers.5426 *5427 * @{5428 */5429 5430 /**5431 * Updates the x87.DS and FPUDP registers.5432 *5433 * @param pVCpu The cross context virtual CPU structure of the calling thread.5434 * @param pFpuCtx The FPU context.5435 * @param iEffSeg The effective segment register.5436 * @param GCPtrEff The effective address relative to @a iEffSeg.5437 */5438 DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)5439 {5440 RTSEL sel;5441 switch (iEffSeg)5442 {5443 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;5444 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;5445 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;5446 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;5447 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;5448 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;5449 default:5450 AssertMsgFailed(("%d\n", iEffSeg));5451 sel = pVCpu->cpum.GstCtx.ds.Sel;5452 }5453 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */5454 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))5455 {5456 pFpuCtx->DS = 0;5457 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);5458 }5459 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */5460 {5461 pFpuCtx->DS = sel;5462 pFpuCtx->FPUDP = GCPtrEff;5463 }5464 else5465 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;5466 }5467 5468 5469 /**5470 * Rotates the stack registers in the push direction.5471 *5472 * @param pFpuCtx The FPU context.5473 * @remarks This is a complete waste of time, but fxsave stores the registers in5474 * stack order.5475 */5476 DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)5477 {5478 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;5479 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;5480 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;5481 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;5482 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;5483 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;5484 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;5485 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;5486 pFpuCtx->aRegs[0].r80 = r80Tmp;5487 }5488 5489 5490 /**5491 * Rotates the stack registers in the pop direction.5492 *5493 * @param pFpuCtx The FPU context.5494 * @remarks This is a complete waste of time, but fxsave stores the registers in5495 * stack order.5496 */5497 DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)5498 {5499 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;5500 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;5501 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;5502 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;5503 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;5504 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;5505 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;5506 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;5507 pFpuCtx->aRegs[7].r80 = r80Tmp;5508 }5509 5510 5511 /**5512 * Updates FSW and pushes a FPU result onto the FPU stack if no pending5513 * exception prevents it.5514 *5515 * @param pVCpu The cross context virtual CPU structure of the calling thread.5516 * @param pResult The FPU operation result to push.5517 * @param pFpuCtx The FPU context.5518 */5519 static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT5520 {5521 /* Update FSW and bail if there are pending exceptions afterwards. */5522 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;5523 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5524 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5525 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5526 {5527 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))5528 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",5529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));5530 pFpuCtx->FSW = fFsw;5531 return;5532 }5533 5534 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;5535 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))5536 {5537 /* All is fine, push the actual value. */5538 pFpuCtx->FTW |= RT_BIT(iNewTop);5539 pFpuCtx->aRegs[7].r80 = pResult->r80Result;5540 }5541 else if (pFpuCtx->FCW & X86_FCW_IM)5542 {5543 /* Masked stack overflow, push QNaN. */5544 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;5545 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);5546 }5547 else5548 {5549 /* Raise stack overflow, don't push anything. */5550 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;5551 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;5552 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",5553 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5554 return;5555 }5556 5557 fFsw &= ~X86_FSW_TOP_MASK;5558 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;5559 pFpuCtx->FSW = fFsw;5560 5561 iemFpuRotateStackPush(pFpuCtx);5562 RT_NOREF(pVCpu);5563 }5564 5565 5566 /**5567 * Stores a result in a FPU register and updates the FSW and FTW.5568 *5569 * @param pVCpu The cross context virtual CPU structure of the calling thread.5570 * @param pFpuCtx The FPU context.5571 * @param pResult The result to store.5572 * @param iStReg Which FPU register to store it in.5573 */5574 static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT5575 {5576 Assert(iStReg < 8);5577 uint16_t fNewFsw = pFpuCtx->FSW;5578 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;5579 fNewFsw &= ~X86_FSW_C_MASK;5580 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5581 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5582 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",5583 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));5584 pFpuCtx->FSW = fNewFsw;5585 pFpuCtx->FTW |= RT_BIT(iReg);5586 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;5587 RT_NOREF(pVCpu);5588 }5589 5590 5591 /**5592 * Only updates the FPU status word (FSW) with the result of the current5593 * instruction.5594 *5595 * @param pVCpu The cross context virtual CPU structure of the calling thread.5596 * @param pFpuCtx The FPU context.5597 * @param u16FSW The FSW output of the current instruction.5598 */5599 static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT5600 {5601 uint16_t fNewFsw = pFpuCtx->FSW;5602 fNewFsw &= ~X86_FSW_C_MASK;5603 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;5604 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5605 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",5606 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));5607 pFpuCtx->FSW = fNewFsw;5608 RT_NOREF(pVCpu);5609 }5610 5611 5612 /**5613 * Pops one item off the FPU stack if no pending exception prevents it.5614 *5615 * @param pFpuCtx The FPU context.5616 */5617 static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT5618 {5619 /* Check pending exceptions. */5620 uint16_t uFSW = pFpuCtx->FSW;5621 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5622 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5623 return;5624 5625 /* TOP--. */5626 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;5627 uFSW &= ~X86_FSW_TOP_MASK;5628 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;5629 pFpuCtx->FSW = uFSW;5630 5631 /* Mark the previous ST0 as empty. */5632 iOldTop >>= X86_FSW_TOP_SHIFT;5633 pFpuCtx->FTW &= ~RT_BIT(iOldTop);5634 5635 /* Rotate the registers. */5636 iemFpuRotateStackPop(pFpuCtx);5637 }5638 5639 5640 /**5641 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.5642 *5643 * @param pVCpu The cross context virtual CPU structure of the calling thread.5644 * @param pResult The FPU operation result to push.5645 * @param uFpuOpcode The FPU opcode value.5646 */5647 void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT5648 {5649 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5650 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5651 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);5652 }5653 5654 5655 /**5656 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,5657 * and sets FPUDP and FPUDS.5658 *5659 * @param pVCpu The cross context virtual CPU structure of the calling thread.5660 * @param pResult The FPU operation result to push.5661 * @param iEffSeg The effective segment register.5662 * @param GCPtrEff The effective address relative to @a iEffSeg.5663 * @param uFpuOpcode The FPU opcode value.5664 */5665 void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,5666 uint16_t uFpuOpcode) RT_NOEXCEPT5667 {5668 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5669 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5670 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5671 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);5672 }5673 5674 5675 /**5676 * Replace ST0 with the first value and push the second onto the FPU stack,5677 * unless a pending exception prevents it.5678 *5679 * @param pVCpu The cross context virtual CPU structure of the calling thread.5680 * @param pResult The FPU operation result to store and push.5681 * @param uFpuOpcode The FPU opcode value.5682 */5683 void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT5684 {5685 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5686 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5687 5688 /* Update FSW and bail if there are pending exceptions afterwards. */5689 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;5690 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;5691 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))5692 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))5693 {5694 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))5695 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",5696 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));5697 pFpuCtx->FSW = fFsw;5698 return;5699 }5700 5701 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;5702 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))5703 {5704 /* All is fine, push the actual value. */5705 pFpuCtx->FTW |= RT_BIT(iNewTop);5706 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;5707 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;5708 }5709 else if (pFpuCtx->FCW & X86_FCW_IM)5710 {5711 /* Masked stack overflow, push QNaN. */5712 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;5713 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);5714 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);5715 }5716 else5717 {5718 /* Raise stack overflow, don't push anything. */5719 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;5720 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;5721 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",5722 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5723 return;5724 }5725 5726 fFsw &= ~X86_FSW_TOP_MASK;5727 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;5728 pFpuCtx->FSW = fFsw;5729 5730 iemFpuRotateStackPush(pFpuCtx);5731 }5732 5733 5734 /**5735 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and5736 * FOP.5737 *5738 * @param pVCpu The cross context virtual CPU structure of the calling thread.5739 * @param pResult The result to store.5740 * @param iStReg Which FPU register to store it in.5741 * @param uFpuOpcode The FPU opcode value.5742 */5743 void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5744 {5745 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5746 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5747 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5748 }5749 5750 5751 /**5752 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and5753 * FOP, and then pops the stack.5754 *5755 * @param pVCpu The cross context virtual CPU structure of the calling thread.5756 * @param pResult The result to store.5757 * @param iStReg Which FPU register to store it in.5758 * @param uFpuOpcode The FPU opcode value.5759 */5760 void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5761 {5762 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5763 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5764 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5765 iemFpuMaybePopOne(pFpuCtx);5766 }5767 5768 5769 /**5770 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,5771 * FPUDP, and FPUDS.5772 *5773 * @param pVCpu The cross context virtual CPU structure of the calling thread.5774 * @param pResult The result to store.5775 * @param iStReg Which FPU register to store it in.5776 * @param iEffSeg The effective memory operand selector register.5777 * @param GCPtrEff The effective memory operand offset.5778 * @param uFpuOpcode The FPU opcode value.5779 */5780 void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,5781 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5782 {5783 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5784 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5785 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5786 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5787 }5788 5789 5790 /**5791 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,5792 * FPUDP, and FPUDS, and then pops the stack.5793 *5794 * @param pVCpu The cross context virtual CPU structure of the calling thread.5795 * @param pResult The result to store.5796 * @param iStReg Which FPU register to store it in.5797 * @param iEffSeg The effective memory operand selector register.5798 * @param GCPtrEff The effective memory operand offset.5799 * @param uFpuOpcode The FPU opcode value.5800 */5801 void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,5802 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5803 {5804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5805 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5806 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5807 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);5808 iemFpuMaybePopOne(pFpuCtx);5809 }5810 5811 5812 /**5813 * Updates the FOP, FPUIP, and FPUCS. For FNOP.5814 *5815 * @param pVCpu The cross context virtual CPU structure of the calling thread.5816 * @param uFpuOpcode The FPU opcode value.5817 */5818 void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT5819 {5820 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5821 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5822 }5823 5824 5825 /**5826 * Updates the FSW, FOP, FPUIP, and FPUCS.5827 *5828 * @param pVCpu The cross context virtual CPU structure of the calling thread.5829 * @param u16FSW The FSW from the current instruction.5830 * @param uFpuOpcode The FPU opcode value.5831 */5832 void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5833 {5834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5835 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5836 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5837 }5838 5839 5840 /**5841 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.5842 *5843 * @param pVCpu The cross context virtual CPU structure of the calling thread.5844 * @param u16FSW The FSW from the current instruction.5845 * @param uFpuOpcode The FPU opcode value.5846 */5847 void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5848 {5849 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5850 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5851 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5852 iemFpuMaybePopOne(pFpuCtx);5853 }5854 5855 5856 /**5857 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.5858 *5859 * @param pVCpu The cross context virtual CPU structure of the calling thread.5860 * @param u16FSW The FSW from the current instruction.5861 * @param iEffSeg The effective memory operand selector register.5862 * @param GCPtrEff The effective memory operand offset.5863 * @param uFpuOpcode The FPU opcode value.5864 */5865 void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5866 {5867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5868 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5869 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5870 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5871 }5872 5873 5874 /**5875 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.5876 *5877 * @param pVCpu The cross context virtual CPU structure of the calling thread.5878 * @param u16FSW The FSW from the current instruction.5879 * @param uFpuOpcode The FPU opcode value.5880 */5881 void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT5882 {5883 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5884 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5885 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5886 iemFpuMaybePopOne(pFpuCtx);5887 iemFpuMaybePopOne(pFpuCtx);5888 }5889 5890 5891 /**5892 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.5893 *5894 * @param pVCpu The cross context virtual CPU structure of the calling thread.5895 * @param u16FSW The FSW from the current instruction.5896 * @param iEffSeg The effective memory operand selector register.5897 * @param GCPtrEff The effective memory operand offset.5898 * @param uFpuOpcode The FPU opcode value.5899 */5900 void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,5901 uint16_t uFpuOpcode) RT_NOEXCEPT5902 {5903 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5904 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5905 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5906 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);5907 iemFpuMaybePopOne(pFpuCtx);5908 }5909 5910 5911 /**5912 * Worker routine for raising an FPU stack underflow exception.5913 *5914 * @param pVCpu The cross context virtual CPU structure of the calling thread.5915 * @param pFpuCtx The FPU context.5916 * @param iStReg The stack register being accessed.5917 */5918 static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)5919 {5920 Assert(iStReg < 8 || iStReg == UINT8_MAX);5921 if (pFpuCtx->FCW & X86_FCW_IM)5922 {5923 /* Masked underflow. */5924 pFpuCtx->FSW &= ~X86_FSW_C_MASK;5925 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;5926 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;5927 if (iStReg != UINT8_MAX)5928 {5929 pFpuCtx->FTW |= RT_BIT(iReg);5930 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);5931 }5932 }5933 else5934 {5935 pFpuCtx->FSW &= ~X86_FSW_C_MASK;5936 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;5937 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",5938 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));5939 }5940 RT_NOREF(pVCpu);5941 }5942 5943 5944 /**5945 * Raises a FPU stack underflow exception.5946 *5947 * @param pVCpu The cross context virtual CPU structure of the calling thread.5948 * @param iStReg The destination register that should be loaded5949 * with QNaN if \#IS is not masked. Specify5950 * UINT8_MAX if none (like for fcom).5951 * @param uFpuOpcode The FPU opcode value.5952 */5953 void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5954 {5955 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5956 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5957 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5958 }5959 5960 5961 void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT5962 {5963 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5964 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5965 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5966 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5967 }5968 5969 5970 void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT5971 {5972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5973 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5974 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5975 iemFpuMaybePopOne(pFpuCtx);5976 }5977 5978 5979 void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,5980 uint16_t uFpuOpcode) RT_NOEXCEPT5981 {5982 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5983 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);5984 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5985 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);5986 iemFpuMaybePopOne(pFpuCtx);5987 }5988 5989 5990 void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT5991 {5992 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;5993 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);5994 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);5995 iemFpuMaybePopOne(pFpuCtx);5996 iemFpuMaybePopOne(pFpuCtx);5997 }5998 5999 6000 void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6001 {6002 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6003 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6004 6005 if (pFpuCtx->FCW & X86_FCW_IM)6006 {6007 /* Masked overflow - Push QNaN. */6008 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6009 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6010 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;6011 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6012 pFpuCtx->FTW |= RT_BIT(iNewTop);6013 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6014 iemFpuRotateStackPush(pFpuCtx);6015 }6016 else6017 {6018 /* Exception pending - don't change TOP or the register stack. */6019 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6020 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6021 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",6022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6023 }6024 }6025 6026 6027 void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6028 {6029 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6030 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6031 6032 if (pFpuCtx->FCW & X86_FCW_IM)6033 {6034 /* Masked overflow - Push QNaN. */6035 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6036 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6037 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;6038 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6039 pFpuCtx->FTW |= RT_BIT(iNewTop);6040 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);6041 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6042 iemFpuRotateStackPush(pFpuCtx);6043 }6044 else6045 {6046 /* Exception pending - don't change TOP or the register stack. */6047 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6048 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6049 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",6050 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6051 }6052 }6053 6054 6055 /**6056 * Worker routine for raising an FPU stack overflow exception on a push.6057 *6058 * @param pVCpu The cross context virtual CPU structure of the calling thread.6059 * @param pFpuCtx The FPU context.6060 */6061 static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT6062 {6063 if (pFpuCtx->FCW & X86_FCW_IM)6064 {6065 /* Masked overflow. */6066 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;6067 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);6068 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;6069 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;6070 pFpuCtx->FTW |= RT_BIT(iNewTop);6071 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);6072 iemFpuRotateStackPush(pFpuCtx);6073 }6074 else6075 {6076 /* Exception pending - don't change TOP or the register stack. */6077 pFpuCtx->FSW &= ~X86_FSW_C_MASK;6078 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;6079 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",6080 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));6081 }6082 RT_NOREF(pVCpu);6083 }6084 6085 6086 /**6087 * Raises a FPU stack overflow exception on a push.6088 *6089 * @param pVCpu The cross context virtual CPU structure of the calling thread.6090 * @param uFpuOpcode The FPU opcode value.6091 */6092 void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT6093 {6094 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6095 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6096 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);6097 }6098 6099 6100 /**6101 * Raises a FPU stack overflow exception on a push with a memory operand.6102 *6103 * @param pVCpu The cross context virtual CPU structure of the calling thread.6104 * @param iEffSeg The effective memory operand selector register.6105 * @param GCPtrEff The effective memory operand offset.6106 * @param uFpuOpcode The FPU opcode value.6107 */6108 void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT6109 {6110 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;6111 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);6112 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);6113 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);6114 }6115 6116 /** @} */6117 6118 6119 /** @name Memory access.6120 *6121 * @{6122 */6123 6124 #undef LOG_GROUP6125 #define LOG_GROUP LOG_GROUP_IEM_MEM6126 6127 /**6128 * Applies the segment limit, base and attributes.6129 *6130 * This may raise a \#GP or \#SS.6131 *6132 * @returns VBox strict status code.6133 *6134 * @param pVCpu The cross context virtual CPU structure of the calling thread.6135 * @param fAccess The kind of access which is being performed.6136 * @param iSegReg The index of the segment register to apply.6137 * This is UINT8_MAX if none (for IDT, GDT, LDT,6138 * TSS, ++).6139 * @param cbMem The access size.6140 * @param pGCPtrMem Pointer to the guest memory address to apply6141 * segmentation to. Input and output parameter.6142 */6143 VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT6144 {6145 if (iSegReg == UINT8_MAX)6146 return VINF_SUCCESS;6147 6148 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));6149 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);6150 switch (IEM_GET_CPU_MODE(pVCpu))6151 {6152 case IEMMODE_16BIT:6153 case IEMMODE_32BIT:6154 {6155 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;6156 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;6157 6158 if ( pSel->Attr.n.u1Present6159 && !pSel->Attr.n.u1Unusable)6160 {6161 Assert(pSel->Attr.n.u1DescType);6162 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))6163 {6164 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)6165 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )6166 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);6167 6168 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))6169 {6170 /** @todo CPL check. */6171 }6172 6173 /*6174 * There are two kinds of data selectors, normal and expand down.6175 */6176 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))6177 {6178 if ( GCPtrFirst32 > pSel->u32Limit6179 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */6180 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6181 }6182 else6183 {6184 /*6185 * The upper boundary is defined by the B bit, not the G bit!6186 */6187 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)6188 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))6189 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6190 }6191 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;6192 }6193 else6194 {6195 /*6196 * Code selector and usually be used to read thru, writing is6197 * only permitted in real and V8086 mode.6198 */6199 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)6200 || ( (fAccess & IEM_ACCESS_TYPE_READ)6201 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )6202 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )6203 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);6204 6205 if ( GCPtrFirst32 > pSel->u32Limit6206 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */6207 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);6208 6209 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))6210 {6211 /** @todo CPL check. */6212 }6213 6214 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;6215 }6216 }6217 else6218 return iemRaiseGeneralProtectionFault0(pVCpu);6219 return VINF_SUCCESS;6220 }6221 6222 case IEMMODE_64BIT:6223 {6224 RTGCPTR GCPtrMem = *pGCPtrMem;6225 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)6226 *pGCPtrMem = GCPtrMem + pSel->u64Base;6227 6228 Assert(cbMem >= 1);6229 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))6230 return VINF_SUCCESS;6231 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.6232 * 4.12.2 "Data Limit Checks in 64-bit Mode". */6233 return iemRaiseGeneralProtectionFault0(pVCpu);6234 }6235 6236 default:6237 AssertFailedReturn(VERR_IEM_IPE_7);6238 }6239 }6240 6241 6242 /**6243 * Translates a virtual address to a physical physical address and checks if we6244 * can access the page as specified.6245 *6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.6247 * @param GCPtrMem The virtual address.6248 * @param cbAccess The access size, for raising \#PF correctly for6249 * FXSAVE and such.6250 * @param fAccess The intended access.6251 * @param pGCPhysMem Where to return the physical address.6252 */6253 VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,6254 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT6255 {6256 /** @todo Need a different PGM interface here. We're currently using6257 * generic / REM interfaces. this won't cut it for R0. */6258 /** @todo If/when PGM handles paged real-mode, we can remove the hack in6259 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault6260 * here. */6261 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));6262 PGMPTWALKFAST WalkFast;6263 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);6264 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);6265 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);6266 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);6267 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))6268 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);6269 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))6270 fQPage |= PGMQPAGE_F_USER_MODE;6271 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);6272 if (RT_SUCCESS(rc))6273 {6274 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);6275 6276 /* If the page is writable and does not have the no-exec bit set, all6277 access is allowed. Otherwise we'll have to check more carefully... */6278 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)6279 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)6280 || (WalkFast.fEffective & X86_PTE_RW)6281 || ( ( IEM_GET_CPL(pVCpu) != 36282 || (fAccess & IEM_ACCESS_WHAT_SYS))6283 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )6284 && ( (WalkFast.fEffective & X86_PTE_US)6285 || IEM_GET_CPL(pVCpu) != 36286 || (fAccess & IEM_ACCESS_WHAT_SYS) )6287 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)6288 || !(WalkFast.fEffective & X86_PTE_PAE_NX)6289 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )6290 )6291 );6292 6293 /* PGMGstQueryPageFast sets the A & D bits. */6294 /** @todo testcase: check when A and D bits are actually set by the CPU. */6295 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));6296 6297 *pGCPhysMem = WalkFast.GCPhys;6298 return VINF_SUCCESS;6299 }6300 6301 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));6302 /** @todo Check unassigned memory in unpaged mode. */6303 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT6304 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)6305 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);6306 #endif6307 *pGCPhysMem = NIL_RTGCPHYS;6308 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);6309 }6310 6311 #if 0 /*unused*/6312 /**6313 * Looks up a memory mapping entry.6314 *6315 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).6316 * @param pVCpu The cross context virtual CPU structure of the calling thread.6317 * @param pvMem The memory address.6318 * @param fAccess The access to.6319 */6320 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)6321 {6322 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));6323 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;6324 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem6325 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6326 return 0;6327 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem6328 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6329 return 1;6330 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem6331 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)6332 return 2;6333 return VERR_NOT_FOUND;6334 }6335 #endif6336 6337 /**6338 * Finds a free memmap entry when using iNextMapping doesn't work.6339 *6340 * @returns Memory mapping index, 1024 on failure.6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.6342 */6343 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)6344 {6345 /*6346 * The easy case.6347 */6348 if (pVCpu->iem.s.cActiveMappings == 0)6349 {6350 pVCpu->iem.s.iNextMapping = 1;6351 return 0;6352 }6353 6354 /* There should be enough mappings for all instructions. */6355 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);6356 6357 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)6358 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)6359 return i;6360 6361 AssertFailedReturn(1024);6362 }6363 6364 6365 /**6366 * Commits a bounce buffer that needs writing back and unmaps it.6367 *6368 * @returns Strict VBox status code.6369 * @param pVCpu The cross context virtual CPU structure of the calling thread.6370 * @param iMemMap The index of the buffer to commit.6371 * @param fPostponeFail Whether we can postpone writer failures to ring-3.6372 * Always false in ring-3, obviously.6373 */6374 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)6375 {6376 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);6377 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);6378 #ifdef IN_RING36379 Assert(!fPostponeFail);6380 RT_NOREF_PV(fPostponeFail);6381 #endif6382 6383 /*6384 * Do the writing.6385 */6386 PVMCC pVM = pVCpu->CTX_SUFF(pVM);6387 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)6388 {6389 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;6390 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;6391 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6392 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6393 {6394 /*6395 * Carefully and efficiently dealing with access handler return6396 * codes make this a little bloated.6397 */6398 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,6399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,6400 pbBuf,6401 cbFirst,6402 PGMACCESSORIGIN_IEM);6403 if (rcStrict == VINF_SUCCESS)6404 {6405 if (cbSecond)6406 {6407 rcStrict = PGMPhysWrite(pVM,6408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6409 pbBuf + cbFirst,6410 cbSecond,6411 PGMACCESSORIGIN_IEM);6412 if (rcStrict == VINF_SUCCESS)6413 { /* nothing */ }6414 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6415 {6416 LogEx(LOG_GROUP_IEM,6417 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",6418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6420 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6421 }6422 #ifndef IN_RING36423 else if (fPostponeFail)6424 {6425 LogEx(LOG_GROUP_IEM,6426 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6429 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;6430 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6431 return iemSetPassUpStatus(pVCpu, rcStrict);6432 }6433 #endif6434 else6435 {6436 LogEx(LOG_GROUP_IEM,6437 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6440 return rcStrict;6441 }6442 }6443 }6444 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6445 {6446 if (!cbSecond)6447 {6448 LogEx(LOG_GROUP_IEM,6449 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",6450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));6451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6452 }6453 else6454 {6455 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,6456 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6457 pbBuf + cbFirst,6458 cbSecond,6459 PGMACCESSORIGIN_IEM);6460 if (rcStrict2 == VINF_SUCCESS)6461 {6462 LogEx(LOG_GROUP_IEM,6463 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",6464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6466 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6467 }6468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))6469 {6470 LogEx(LOG_GROUP_IEM,6471 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",6472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));6474 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);6475 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6476 }6477 #ifndef IN_RING36478 else if (fPostponeFail)6479 {6480 LogEx(LOG_GROUP_IEM,6481 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6484 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;6485 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6486 return iemSetPassUpStatus(pVCpu, rcStrict);6487 }6488 #endif6489 else6490 {6491 LogEx(LOG_GROUP_IEM,6492 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));6495 return rcStrict2;6496 }6497 }6498 }6499 #ifndef IN_RING36500 else if (fPostponeFail)6501 {6502 LogEx(LOG_GROUP_IEM,6503 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",6504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));6506 if (!cbSecond)6507 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;6508 else6509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;6510 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);6511 return iemSetPassUpStatus(pVCpu, rcStrict);6512 }6513 #endif6514 else6515 {6516 LogEx(LOG_GROUP_IEM,6517 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",6518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),6519 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6520 return rcStrict;6521 }6522 }6523 else6524 {6525 /*6526 * No access handlers, much simpler.6527 */6528 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);6529 if (RT_SUCCESS(rc))6530 {6531 if (cbSecond)6532 {6533 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);6534 if (RT_SUCCESS(rc))6535 { /* likely */ }6536 else6537 {6538 LogEx(LOG_GROUP_IEM,6539 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",6540 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,6541 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));6542 return rc;6543 }6544 }6545 }6546 else6547 {6548 LogEx(LOG_GROUP_IEM,6549 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",6550 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,6551 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));6552 return rc;6553 }6554 }6555 }6556 6557 #if defined(IEM_LOG_MEMORY_WRITES)6558 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,6559 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));6560 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)6561 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,6562 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),6563 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));6564 6565 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;6566 g_cbIemWrote = cbWrote;6567 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));6568 #endif6569 6570 /*6571 * Free the mapping entry.6572 */6573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;6574 Assert(pVCpu->iem.s.cActiveMappings != 0);6575 pVCpu->iem.s.cActiveMappings--;6576 return VINF_SUCCESS;6577 }6578 6579 6580 /**6581 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.6582 */6583 DECL_FORCE_INLINE(uint32_t)6584 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)6585 {6586 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;6587 if (fAccess & IEM_ACCESS_TYPE_WRITE)6588 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);6589 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);6590 }6591 6592 6593 /**6594 * iemMemMap worker that deals with a request crossing pages.6595 */6596 static VBOXSTRICTRC6597 iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,6598 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)6599 {6600 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);6601 Assert(cbMem <= GUEST_PAGE_SIZE);6602 6603 /*6604 * Do the address translations.6605 */6606 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);6607 RTGCPHYS GCPhysFirst;6608 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);6609 if (rcStrict != VINF_SUCCESS)6610 return rcStrict;6611 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));6612 6613 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;6614 RTGCPHYS GCPhysSecond;6615 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,6616 cbSecondPage, fAccess, &GCPhysSecond);6617 if (rcStrict != VINF_SUCCESS)6618 return rcStrict;6619 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);6620 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */6621 6622 PVMCC pVM = pVCpu->CTX_SUFF(pVM);6623 6624 /*6625 * Check for data breakpoints.6626 */6627 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))6628 { /* likely */ }6629 else6630 {6631 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);6632 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,6633 cbSecondPage, fAccess);6634 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);6635 if (fDataBps > 1)6636 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",6637 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));6638 }6639 6640 /*6641 * Read in the current memory content if it's a read, execute or partial6642 * write access.6643 */6644 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6645 6646 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))6647 {6648 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6649 {6650 /*6651 * Must carefully deal with access handler status codes here,6652 * makes the code a bit bloated.6653 */6654 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);6655 if (rcStrict == VINF_SUCCESS)6656 {6657 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);6658 if (rcStrict == VINF_SUCCESS)6659 { /*likely */ }6660 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6661 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6662 else6663 {6664 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",6665 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));6666 return rcStrict;6667 }6668 }6669 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6670 {6671 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);6672 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))6673 {6674 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);6675 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6676 }6677 else6678 {6679 LogEx(LOG_GROUP_IEM,6680 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",6681 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));6682 return rcStrict2;6683 }6684 }6685 else6686 {6687 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6688 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6689 return rcStrict;6690 }6691 }6692 else6693 {6694 /*6695 * No informational status codes here, much more straight forward.6696 */6697 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);6698 if (RT_SUCCESS(rc))6699 {6700 Assert(rc == VINF_SUCCESS);6701 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);6702 if (RT_SUCCESS(rc))6703 Assert(rc == VINF_SUCCESS);6704 else6705 {6706 LogEx(LOG_GROUP_IEM,6707 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));6708 return rc;6709 }6710 }6711 else6712 {6713 LogEx(LOG_GROUP_IEM,6714 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));6715 return rc;6716 }6717 }6718 }6719 #ifdef VBOX_STRICT6720 else6721 memset(pbBuf, 0xcc, cbMem);6722 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))6723 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);6724 #endif6725 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);6726 6727 /*6728 * Commit the bounce buffer entry.6729 */6730 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;6731 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;6732 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;6733 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;6734 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;6735 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;6736 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;6737 pVCpu->iem.s.iNextMapping = iMemMap + 1;6738 pVCpu->iem.s.cActiveMappings++;6739 6740 *ppvMem = pbBuf;6741 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);6742 return VINF_SUCCESS;6743 }6744 6745 6746 /**6747 * iemMemMap woker that deals with iemMemPageMap failures.6748 */6749 static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,6750 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)6751 {6752 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);6753 6754 /*6755 * Filter out conditions we can handle and the ones which shouldn't happen.6756 */6757 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE6758 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL6759 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)6760 {6761 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);6762 return rcMap;6763 }6764 pVCpu->iem.s.cPotentialExits++;6765 6766 /*6767 * Read in the current memory content if it's a read, execute or partial6768 * write access.6769 */6770 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];6771 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))6772 {6773 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)6774 memset(pbBuf, 0xff, cbMem);6775 else6776 {6777 int rc;6778 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))6779 {6780 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);6781 if (rcStrict == VINF_SUCCESS)6782 { /* nothing */ }6783 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))6784 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);6785 else6786 {6787 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6788 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));6789 return rcStrict;6790 }6791 }6792 else6793 {6794 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);6795 if (RT_SUCCESS(rc))6796 { /* likely */ }6797 else6798 {6799 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",6800 GCPhysFirst, rc));6801 return rc;6802 }6803 }6804 }6805 }6806 #ifdef VBOX_STRICT6807 else6808 memset(pbBuf, 0xcc, cbMem);6809 #endif6810 #ifdef VBOX_STRICT6811 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))6812 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);6813 #endif6814 6815 /*6816 * Commit the bounce buffer entry.6817 */6818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;6819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;6820 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;6821 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;6822 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;6823 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;6824 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;6825 pVCpu->iem.s.iNextMapping = iMemMap + 1;6826 pVCpu->iem.s.cActiveMappings++;6827 6828 *ppvMem = pbBuf;6829 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);6830 return VINF_SUCCESS;6831 }6832 6833 6834 6835 /**6836 * Maps the specified guest memory for the given kind of access.6837 *6838 * This may be using bounce buffering of the memory if it's crossing a page6839 * boundary or if there is an access handler installed for any of it. Because6840 * of lock prefix guarantees, we're in for some extra clutter when this6841 * happens.6842 *6843 * This may raise a \#GP, \#SS, \#PF or \#AC.6844 *6845 * @returns VBox strict status code.6846 *6847 * @param pVCpu The cross context virtual CPU structure of the calling thread.6848 * @param ppvMem Where to return the pointer to the mapped memory.6849 * @param pbUnmapInfo Where to return unmap info to be passed to6850 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when6851 * done.6852 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,6853 * 8, 12, 16, 32 or 512. When used by string operations6854 * it can be up to a page.6855 * @param iSegReg The index of the segment register to use for this6856 * access. The base and limits are checked. Use UINT8_MAX6857 * to indicate that no segmentation is required (for IDT,6858 * GDT and LDT accesses).6859 * @param GCPtrMem The address of the guest memory.6860 * @param fAccess How the memory is being accessed. The6861 * IEM_ACCESS_TYPE_XXX part is used to figure out how to6862 * map the memory, while the IEM_ACCESS_WHAT_XXX part is6863 * used when raising exceptions. The IEM_ACCESS_ATOMIC and6864 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be6865 * set.6866 * @param uAlignCtl Alignment control:6867 * - Bits 15:0 is the alignment mask.6868 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,6869 * IEM_MEMMAP_F_ALIGN_SSE, and6870 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.6871 * Pass zero to skip alignment.6872 */6873 VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,6874 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT6875 {6876 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);6877 6878 /*6879 * Check the input and figure out which mapping entry to use.6880 */6881 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));6882 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 946883 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );6884 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));6885 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));6886 6887 unsigned iMemMap = pVCpu->iem.s.iNextMapping;6888 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)6889 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)6890 {6891 iMemMap = iemMemMapFindFree(pVCpu);6892 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),6893 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,6894 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,6895 pVCpu->iem.s.aMemMappings[2].fAccess),6896 VERR_IEM_IPE_9);6897 }6898 6899 /*6900 * Map the memory, checking that we can actually access it. If something6901 * slightly complicated happens, fall back on bounce buffering.6902 */6903 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);6904 if (rcStrict == VINF_SUCCESS)6905 { /* likely */ }6906 else6907 return rcStrict;6908 6909 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */6910 { /* likely */ }6911 else6912 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);6913 6914 /*6915 * Alignment check.6916 */6917 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )6918 { /* likelyish */ }6919 else6920 {6921 /* Misaligned access. */6922 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)6923 {6924 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)6925 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)6926 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )6927 {6928 AssertCompile(X86_CR0_AM == X86_EFL_AC);6929 6930 if (!iemMemAreAlignmentChecksEnabled(pVCpu))6931 { /* likely */ }6932 else6933 return iemRaiseAlignmentCheckException(pVCpu);6934 }6935 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)6936 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */6937 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU6938 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as6939 * that's what FXSAVE does on a 10980xe. */6940 && iemMemAreAlignmentChecksEnabled(pVCpu))6941 return iemRaiseAlignmentCheckException(pVCpu);6942 else6943 return iemRaiseGeneralProtectionFault0(pVCpu);6944 }6945 6946 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)6947 /* If the access is atomic there are host platform alignmnet restrictions6948 we need to conform with. */6949 if ( !(fAccess & IEM_ACCESS_ATOMIC)6950 # if defined(RT_ARCH_AMD64)6951 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */6952 # elif defined(RT_ARCH_ARM64)6953 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */6954 # else6955 # error port me6956 # endif6957 )6958 { /* okay */ }6959 else6960 {6961 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));6962 pVCpu->iem.s.cMisalignedAtomics += 1;6963 return VINF_EM_EMULATE_SPLIT_LOCK;6964 }6965 #endif6966 }6967 6968 #ifdef IEM_WITH_DATA_TLB6969 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));6970 6971 /*6972 * Get the TLB entry for this page and check PT flags.6973 *6974 * We reload the TLB entry if we need to set the dirty bit (accessed6975 * should in theory always be set).6976 */6977 uint8_t *pbMem = NULL;6978 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);6979 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);6980 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);6981 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)6982 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )6983 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)6984 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )6985 {6986 # ifdef IEM_WITH_TLB_STATISTICS6987 pVCpu->iem.s.DataTlb.cTlbCoreHits++;6988 # endif6989 6990 /* If the page is either supervisor only or non-writable, we need to do6991 more careful access checks. */6992 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))6993 {6994 /* Write to read only memory? */6995 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)6996 && (fAccess & IEM_ACCESS_TYPE_WRITE)6997 && ( ( IEM_GET_CPL(pVCpu) == 36998 && !(fAccess & IEM_ACCESS_WHAT_SYS))6999 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))7000 {7001 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));7002 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);7003 }7004 7005 /* Kernel memory accessed by userland? */7006 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)7007 && IEM_GET_CPL(pVCpu) == 37008 && !(fAccess & IEM_ACCESS_WHAT_SYS))7009 {7010 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));7011 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);7012 }7013 }7014 7015 /* Look up the physical page info if necessary. */7016 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)7017 # ifdef IN_RING37018 pbMem = pTlbe->pbMappingR3;7019 # else7020 pbMem = NULL;7021 # endif7022 else7023 {7024 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))7025 { /* likely */ }7026 else7027 IEMTlbInvalidateAllPhysicalSlow(pVCpu);7028 pTlbe->pbMappingR3 = NULL;7029 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;7030 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7031 &pbMem, &pTlbe->fFlagsAndPhysRev);7032 AssertRCReturn(rc, rc);7033 # ifdef IN_RING37034 pTlbe->pbMappingR3 = pbMem;7035 # endif7036 }7037 }7038 else7039 {7040 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;7041 7042 /* This page table walking will set A bits as required by the access while performing the walk.7043 ASSUMES these are set when the address is translated rather than on commit... */7044 /** @todo testcase: check when A bits are actually set by the CPU for code. */7045 PGMPTWALKFAST WalkFast;7046 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);7047 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);7048 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);7049 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);7050 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))7051 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);7052 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))7053 fQPage |= PGMQPAGE_F_USER_MODE;7054 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);7055 if (RT_SUCCESS(rc))7056 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);7057 else7058 {7059 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));7060 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7061 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)7062 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);7063 # endif7064 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);7065 }7066 7067 uint32_t fDataBps;7068 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))7069 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))7070 {7071 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)7072 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */7073 {7074 pTlbe--;7075 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;7076 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7077 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7078 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7079 else7080 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));7081 # endif7082 }7083 else7084 {7085 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;7086 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;7087 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7088 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7089 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7090 else7091 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);7092 # endif7093 }7094 }7095 else7096 {7097 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses7098 to the page with the data access breakpoint armed on it to pass thru here. */7099 if (fDataBps > 1)7100 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",7101 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));7102 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);7103 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;7104 pTlbe->uTag = uTagNoRev;7105 }7106 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)7107 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);7108 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;7109 pTlbe->GCPhys = GCPhysPg;7110 pTlbe->pbMappingR3 = NULL;7111 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));7112 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));7113 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)7114 || !(fAccess & IEM_ACCESS_TYPE_WRITE)7115 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);7116 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)7117 || IEM_GET_CPL(pVCpu) != 37118 || (fAccess & IEM_ACCESS_WHAT_SYS));7119 7120 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)7121 {7122 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))7123 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7124 else7125 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7126 }7127 7128 /* Resolve the physical address. */7129 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));7130 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7131 &pbMem, &pTlbe->fFlagsAndPhysRev);7132 AssertRCReturn(rc, rc);7133 # ifdef IN_RING37134 pTlbe->pbMappingR3 = pbMem;7135 # endif7136 }7137 7138 /*7139 * Check the physical page level access and mapping.7140 */7141 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))7142 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)7143 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )7144 { /* probably likely */ }7145 else7146 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,7147 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,7148 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED7149 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL7150 : VERR_PGM_PHYS_TLB_CATCH_WRITE);7151 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */7152 7153 if (pbMem)7154 {7155 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));7156 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7157 fAccess |= IEM_ACCESS_NOT_LOCKED;7158 }7159 else7160 {7161 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));7162 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7163 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7164 if (rcStrict != VINF_SUCCESS)7165 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7166 }7167 7168 void * const pvMem = pbMem;7169 7170 if (fAccess & IEM_ACCESS_TYPE_WRITE)7171 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7172 if (fAccess & IEM_ACCESS_TYPE_READ)7173 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7174 7175 #else /* !IEM_WITH_DATA_TLB */7176 7177 RTGCPHYS GCPhysFirst;7178 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);7179 if (rcStrict != VINF_SUCCESS)7180 return rcStrict;7181 7182 if (fAccess & IEM_ACCESS_TYPE_WRITE)7183 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7184 if (fAccess & IEM_ACCESS_TYPE_READ)7185 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7186 7187 void *pvMem;7188 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7189 if (rcStrict != VINF_SUCCESS)7190 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7191 7192 #endif /* !IEM_WITH_DATA_TLB */7193 7194 /*7195 * Fill in the mapping table entry.7196 */7197 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;7198 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;7199 pVCpu->iem.s.iNextMapping = iMemMap + 1;7200 pVCpu->iem.s.cActiveMappings += 1;7201 7202 *ppvMem = pvMem;7203 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7204 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);7205 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);7206 7207 return VINF_SUCCESS;7208 }7209 7210 7211 /**7212 * Commits the guest memory if bounce buffered and unmaps it.7213 *7214 * @returns Strict VBox status code.7215 * @param pVCpu The cross context virtual CPU structure of the calling thread.7216 * @param bUnmapInfo Unmap info set by iemMemMap.7217 */7218 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7219 {7220 uintptr_t const iMemMap = bUnmapInfo & 0x7;7221 AssertMsgReturn( (bUnmapInfo & 0x08)7222 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7223 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),7224 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),7225 VERR_NOT_FOUND);7226 7227 /* If it's bounce buffered, we may need to write back the buffer. */7228 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7229 {7230 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7231 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);7232 }7233 /* Otherwise unlock it. */7234 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7235 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7236 7237 /* Free the entry. */7238 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7239 Assert(pVCpu->iem.s.cActiveMappings != 0);7240 pVCpu->iem.s.cActiveMappings--;7241 return VINF_SUCCESS;7242 }7243 7244 7245 /**7246 * Rolls back the guest memory (conceptually only) and unmaps it.7247 *7248 * @param pVCpu The cross context virtual CPU structure of the calling thread.7249 * @param bUnmapInfo Unmap info set by iemMemMap.7250 */7251 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7252 {7253 uintptr_t const iMemMap = bUnmapInfo & 0x7;7254 AssertMsgReturnVoid( (bUnmapInfo & 0x08)7255 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7256 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7257 == ((unsigned)bUnmapInfo >> 4),7258 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));7259 7260 /* Unlock it if necessary. */7261 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7262 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7263 7264 /* Free the entry. */7265 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7266 Assert(pVCpu->iem.s.cActiveMappings != 0);7267 pVCpu->iem.s.cActiveMappings--;7268 }7269 7270 #ifdef IEM_WITH_SETJMP7271 7272 /**7273 * Maps the specified guest memory for the given kind of access, longjmp on7274 * error.7275 *7276 * This may be using bounce buffering of the memory if it's crossing a page7277 * boundary or if there is an access handler installed for any of it. Because7278 * of lock prefix guarantees, we're in for some extra clutter when this7279 * happens.7280 *7281 * This may raise a \#GP, \#SS, \#PF or \#AC.7282 *7283 * @returns Pointer to the mapped memory.7284 *7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.7286 * @param bUnmapInfo Where to return unmap info to be passed to7287 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,7288 * iemMemCommitAndUnmapWoSafeJmp,7289 * iemMemCommitAndUnmapRoSafeJmp,7290 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap7291 * when done.7292 * @param cbMem The number of bytes to map. This is usually 1,7293 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by7294 * string operations it can be up to a page.7295 * @param iSegReg The index of the segment register to use for7296 * this access. The base and limits are checked.7297 * Use UINT8_MAX to indicate that no segmentation7298 * is required (for IDT, GDT and LDT accesses).7299 * @param GCPtrMem The address of the guest memory.7300 * @param fAccess How the memory is being accessed. The7301 * IEM_ACCESS_TYPE_XXX part is used to figure out how to7302 * map the memory, while the IEM_ACCESS_WHAT_XXX part is7303 * used when raising exceptions. The IEM_ACCESS_ATOMIC and7304 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be7305 * set.7306 * @param uAlignCtl Alignment control:7307 * - Bits 15:0 is the alignment mask.7308 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,7309 * IEM_MEMMAP_F_ALIGN_SSE, and7310 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.7311 * Pass zero to skip alignment.7312 * @tparam a_fSafe Whether this is a call from "safe" fallback function in7313 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that7314 * needs counting as such in the statistics.7315 */7316 template<bool a_fSafeCall = false>7317 static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,7318 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP7319 {7320 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);7321 7322 /*7323 * Check the input, check segment access and adjust address7324 * with segment base.7325 */7326 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */7327 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));7328 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));7329 7330 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);7331 if (rcStrict == VINF_SUCCESS) { /*likely*/ }7332 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7333 7334 /*7335 * Alignment check.7336 */7337 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )7338 { /* likelyish */ }7339 else7340 {7341 /* Misaligned access. */7342 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)7343 {7344 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)7345 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)7346 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )7347 {7348 AssertCompile(X86_CR0_AM == X86_EFL_AC);7349 7350 if (iemMemAreAlignmentChecksEnabled(pVCpu))7351 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7352 }7353 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)7354 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */7355 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU7356 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as7357 * that's what FXSAVE does on a 10980xe. */7358 && iemMemAreAlignmentChecksEnabled(pVCpu))7359 iemRaiseAlignmentCheckExceptionJmp(pVCpu);7360 else7361 iemRaiseGeneralProtectionFault0Jmp(pVCpu);7362 }7363 7364 #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)7365 /* If the access is atomic there are host platform alignmnet restrictions7366 we need to conform with. */7367 if ( !(fAccess & IEM_ACCESS_ATOMIC)7368 # if defined(RT_ARCH_AMD64)7369 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */7370 # elif defined(RT_ARCH_ARM64)7371 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */7372 # else7373 # error port me7374 # endif7375 )7376 { /* okay */ }7377 else7378 {7379 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));7380 pVCpu->iem.s.cMisalignedAtomics += 1;7381 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);7382 }7383 #endif7384 }7385 7386 /*7387 * Figure out which mapping entry to use.7388 */7389 unsigned iMemMap = pVCpu->iem.s.iNextMapping;7390 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7391 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)7392 {7393 iMemMap = iemMemMapFindFree(pVCpu);7394 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),7395 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,7396 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,7397 pVCpu->iem.s.aMemMappings[2].fAccess),7398 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));7399 }7400 7401 /*7402 * Crossing a page boundary?7403 */7404 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)7405 { /* No (likely). */ }7406 else7407 {7408 void *pvMem;7409 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);7410 if (rcStrict == VINF_SUCCESS)7411 return pvMem;7412 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7413 }7414 7415 #ifdef IEM_WITH_DATA_TLB7416 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));7417 7418 /*7419 * Get the TLB entry for this page checking that it has the A & D bits7420 * set as per fAccess flags.7421 */7422 /** @todo make the caller pass these in with fAccess. */7423 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 37424 ? IEMTLBE_F_PT_NO_USER : 0;7425 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE7426 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY7427 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)7428 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)7429 ? IEMTLBE_F_PT_NO_WRITE : 0)7430 : 0;7431 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;7432 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);7433 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);7434 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);7435 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)7436 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )7437 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)7438 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )7439 {7440 # ifdef IEM_WITH_TLB_STATISTICS7441 if (a_fSafeCall)7442 pVCpu->iem.s.DataTlb.cTlbSafeHits++;7443 else7444 pVCpu->iem.s.DataTlb.cTlbCoreHits++;7445 # endif7446 }7447 else7448 {7449 if (a_fSafeCall)7450 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;7451 else7452 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;7453 7454 /* This page table walking will set A and D bits as required by the7455 access while performing the walk.7456 ASSUMES these are set when the address is translated rather than on commit... */7457 /** @todo testcase: check when A and D bits are actually set by the CPU. */7458 PGMPTWALKFAST WalkFast;7459 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);7460 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);7461 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);7462 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);7463 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))7464 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);7465 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))7466 fQPage |= PGMQPAGE_F_USER_MODE;7467 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);7468 if (RT_SUCCESS(rc))7469 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);7470 else7471 {7472 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));7473 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7474 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)7475 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);7476 # endif7477 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);7478 }7479 7480 uint32_t fDataBps;7481 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))7482 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))7483 {7484 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)7485 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */7486 {7487 pTlbe--;7488 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;7489 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7490 iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7491 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7492 else7493 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));7494 # endif7495 }7496 else7497 {7498 if (a_fSafeCall)7499 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;7500 else7501 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;7502 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;7503 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)7504 iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));7505 # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP7506 else7507 ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);7508 # endif7509 }7510 }7511 else7512 {7513 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses7514 to the page with the data access breakpoint armed on it to pass thru here. */7515 if (fDataBps > 1)7516 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",7517 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));7518 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);7519 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;7520 pTlbe->uTag = uTagNoRev;7521 }7522 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)7523 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);7524 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;7525 pTlbe->GCPhys = GCPhysPg;7526 pTlbe->pbMappingR3 = NULL;7527 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));7528 Assert( !(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)7529 || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);7530 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));7531 7532 if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)7533 {7534 if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))7535 IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7536 else7537 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);7538 }7539 7540 /* Resolve the physical address. */7541 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));7542 uint8_t *pbMemFullLoad = NULL;7543 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7544 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);7545 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));7546 # ifdef IN_RING37547 pTlbe->pbMappingR3 = pbMemFullLoad;7548 # endif7549 }7550 7551 /*7552 * Check the flags and physical revision.7553 * Note! This will revalidate the uTlbPhysRev after a full load. This is7554 * just to keep the code structure simple (i.e. avoid gotos or similar).7555 */7556 uint8_t *pbMem;7557 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))7558 == pVCpu->iem.s.DataTlb.uTlbPhysRev)7559 # ifdef IN_RING37560 pbMem = pTlbe->pbMappingR3;7561 # else7562 pbMem = NULL;7563 # endif7564 else7565 {7566 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));7567 7568 /*7569 * Okay, something isn't quite right or needs refreshing.7570 */7571 /* Write to read only memory? */7572 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)7573 {7574 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));7575 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7576 /** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether7577 * to trigger an \#PG or a VM nested paging exit here yet! */7578 if (Walk.fFailed & PGM_WALKFAIL_EPT)7579 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);7580 # endif7581 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);7582 }7583 7584 /* Kernel memory accessed by userland? */7585 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)7586 {7587 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));7588 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT7589 /** @todo TLB: See above. */7590 if (Walk.fFailed & PGM_WALKFAIL_EPT)7591 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);7592 # endif7593 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);7594 }7595 7596 /*7597 * Check if the physical page info needs updating.7598 */7599 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)7600 # ifdef IN_RING37601 pbMem = pTlbe->pbMappingR3;7602 # else7603 pbMem = NULL;7604 # endif7605 else7606 {7607 pTlbe->pbMappingR3 = NULL;7608 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;7609 pbMem = NULL;7610 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,7611 &pbMem, &pTlbe->fFlagsAndPhysRev);7612 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));7613 # ifdef IN_RING37614 pTlbe->pbMappingR3 = pbMem;7615 # endif7616 }7617 7618 /*7619 * Check the physical page level access and mapping.7620 */7621 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))7622 { /* probably likely */ }7623 else7624 {7625 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,7626 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,7627 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED7628 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL7629 : VERR_PGM_PHYS_TLB_CATCH_WRITE);7630 if (rcStrict == VINF_SUCCESS)7631 return pbMem;7632 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7633 }7634 }7635 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */7636 7637 if (pbMem)7638 {7639 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));7640 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7641 fAccess |= IEM_ACCESS_NOT_LOCKED;7642 }7643 else7644 {7645 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));7646 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);7647 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7648 if (rcStrict == VINF_SUCCESS)7649 {7650 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7651 return pbMem;7652 }7653 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7654 }7655 7656 void * const pvMem = pbMem;7657 7658 if (fAccess & IEM_ACCESS_TYPE_WRITE)7659 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7660 if (fAccess & IEM_ACCESS_TYPE_READ)7661 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));7662 7663 #else /* !IEM_WITH_DATA_TLB */7664 7665 7666 RTGCPHYS GCPhysFirst;7667 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);7668 if (rcStrict == VINF_SUCCESS) { /*likely*/ }7669 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7670 7671 if (fAccess & IEM_ACCESS_TYPE_WRITE)7672 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7673 if (fAccess & IEM_ACCESS_TYPE_READ)7674 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));7675 7676 void *pvMem;7677 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7678 if (rcStrict == VINF_SUCCESS)7679 { /* likely */ }7680 else7681 {7682 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);7683 if (rcStrict == VINF_SUCCESS)7684 return pvMem;7685 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7686 }7687 7688 #endif /* !IEM_WITH_DATA_TLB */7689 7690 /*7691 * Fill in the mapping table entry.7692 */7693 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;7694 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;7695 pVCpu->iem.s.iNextMapping = iMemMap + 1;7696 pVCpu->iem.s.cActiveMappings++;7697 7698 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);7699 return pvMem;7700 }7701 7702 7703 /** @see iemMemMapJmp */7704 static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,7705 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP7706 {7707 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);7708 }7709 7710 7711 /**7712 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.7713 *7714 * @param pVCpu The cross context virtual CPU structure of the calling thread.7715 * @param pvMem The mapping.7716 * @param fAccess The kind of access.7717 */7718 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7719 {7720 uintptr_t const iMemMap = bUnmapInfo & 0x7;7721 AssertMsgReturnVoid( (bUnmapInfo & 0x08)7722 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7723 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7724 == ((unsigned)bUnmapInfo >> 4),7725 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));7726 7727 /* If it's bounce buffered, we may need to write back the buffer. */7728 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7729 {7730 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7731 {7732 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);7733 if (rcStrict == VINF_SUCCESS)7734 return;7735 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));7736 }7737 }7738 /* Otherwise unlock it. */7739 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7740 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7741 7742 /* Free the entry. */7743 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7744 Assert(pVCpu->iem.s.cActiveMappings != 0);7745 pVCpu->iem.s.cActiveMappings--;7746 }7747 7748 7749 /** Fallback for iemMemCommitAndUnmapRwJmp. */7750 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7751 {7752 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));7753 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7754 }7755 7756 7757 /** Fallback for iemMemCommitAndUnmapAtJmp. */7758 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7759 {7760 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));7761 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7762 }7763 7764 7765 /** Fallback for iemMemCommitAndUnmapWoJmp. */7766 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7767 {7768 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);7769 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7770 }7771 7772 7773 /** Fallback for iemMemCommitAndUnmapRoJmp. */7774 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP7775 {7776 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);7777 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7778 }7779 7780 7781 /** Fallback for iemMemRollbackAndUnmapWo. */7782 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7783 {7784 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);7785 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);7786 }7787 7788 #endif /* IEM_WITH_SETJMP */7789 7790 #ifndef IN_RING37791 /**7792 * Commits the guest memory if bounce buffered and unmaps it, if any bounce7793 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).7794 *7795 * Allows the instruction to be completed and retired, while the IEM user will7796 * return to ring-3 immediately afterwards and do the postponed writes there.7797 *7798 * @returns VBox status code (no strict statuses). Caller must check7799 * VMCPU_FF_IEM before repeating string instructions and similar stuff.7800 * @param pVCpu The cross context virtual CPU structure of the calling thread.7801 * @param pvMem The mapping.7802 * @param fAccess The kind of access.7803 */7804 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT7805 {7806 uintptr_t const iMemMap = bUnmapInfo & 0x7;7807 AssertMsgReturn( (bUnmapInfo & 0x08)7808 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)7809 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))7810 == ((unsigned)bUnmapInfo >> 4),7811 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),7812 VERR_NOT_FOUND);7813 7814 /* If it's bounce buffered, we may need to write back the buffer. */7815 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)7816 {7817 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)7818 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);7819 }7820 /* Otherwise unlock it. */7821 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))7822 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7823 7824 /* Free the entry. */7825 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7826 Assert(pVCpu->iem.s.cActiveMappings != 0);7827 pVCpu->iem.s.cActiveMappings--;7828 return VINF_SUCCESS;7829 }7830 #endif7831 7832 7833 /**7834 * Rollbacks mappings, releasing page locks and such.7835 *7836 * The caller shall only call this after checking cActiveMappings.7837 *7838 * @param pVCpu The cross context virtual CPU structure of the calling thread.7839 */7840 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT7841 {7842 Assert(pVCpu->iem.s.cActiveMappings > 0);7843 7844 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);7845 while (iMemMap-- > 0)7846 {7847 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;7848 if (fAccess != IEM_ACCESS_INVALID)7849 {7850 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));7851 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;7852 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))7853 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);7854 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,7855 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",7856 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,7857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));7858 pVCpu->iem.s.cActiveMappings--;7859 }7860 }7861 }7862 7863 7864 /*7865 * Instantiate R/W templates.7866 */7867 #define TMPL_MEM_WITH_STACK7868 7869 #define TMPL_MEM_TYPE uint8_t7870 #define TMPL_MEM_FN_SUFF U87871 #define TMPL_MEM_FMT_TYPE "%#04x"7872 #define TMPL_MEM_FMT_DESC "byte"7873 #include "IEMAllMemRWTmpl.cpp.h"7874 7875 #define TMPL_MEM_TYPE uint16_t7876 #define TMPL_MEM_FN_SUFF U167877 #define TMPL_MEM_FMT_TYPE "%#06x"7878 #define TMPL_MEM_FMT_DESC "word"7879 #include "IEMAllMemRWTmpl.cpp.h"7880 7881 #define TMPL_WITH_PUSH_SREG7882 #define TMPL_MEM_TYPE uint32_t7883 #define TMPL_MEM_FN_SUFF U327884 #define TMPL_MEM_FMT_TYPE "%#010x"7885 #define TMPL_MEM_FMT_DESC "dword"7886 #include "IEMAllMemRWTmpl.cpp.h"7887 #undef TMPL_WITH_PUSH_SREG7888 7889 #define TMPL_MEM_TYPE uint64_t7890 #define TMPL_MEM_FN_SUFF U647891 #define TMPL_MEM_FMT_TYPE "%#018RX64"7892 #define TMPL_MEM_FMT_DESC "qword"7893 #include "IEMAllMemRWTmpl.cpp.h"7894 7895 #undef TMPL_MEM_WITH_STACK7896 7897 #define TMPL_MEM_TYPE uint32_t7898 #define TMPL_MEM_TYPE_ALIGN 07899 #define TMPL_MEM_FN_SUFF U32NoAc7900 #define TMPL_MEM_FMT_TYPE "%#010x"7901 #define TMPL_MEM_FMT_DESC "dword"7902 #include "IEMAllMemRWTmpl.cpp.h"7903 #undef TMPL_WITH_PUSH_SREG7904 7905 #define TMPL_MEM_TYPE uint64_t7906 #define TMPL_MEM_TYPE_ALIGN 07907 #define TMPL_MEM_FN_SUFF U64NoAc7908 #define TMPL_MEM_FMT_TYPE "%#018RX64"7909 #define TMPL_MEM_FMT_DESC "qword"7910 #include "IEMAllMemRWTmpl.cpp.h"7911 7912 #define TMPL_MEM_TYPE uint64_t7913 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)7914 #define TMPL_MEM_FN_SUFF U64AlignedU1287915 #define TMPL_MEM_FMT_TYPE "%#018RX64"7916 #define TMPL_MEM_FMT_DESC "qword"7917 #include "IEMAllMemRWTmpl.cpp.h"7918 7919 /* See IEMAllMemRWTmplInline.cpp.h */7920 #define TMPL_MEM_BY_REF7921 7922 #define TMPL_MEM_TYPE RTFLOAT80U7923 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)7924 #define TMPL_MEM_FN_SUFF R807925 #define TMPL_MEM_FMT_TYPE "%.10Rhxs"7926 #define TMPL_MEM_FMT_DESC "tword"7927 #include "IEMAllMemRWTmpl.cpp.h"7928 7929 #define TMPL_MEM_TYPE RTPBCD80U7930 #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */7931 #define TMPL_MEM_FN_SUFF D807932 #define TMPL_MEM_FMT_TYPE "%.10Rhxs"7933 #define TMPL_MEM_FMT_DESC "tword"7934 #include "IEMAllMemRWTmpl.cpp.h"7935 7936 #define TMPL_MEM_TYPE RTUINT128U7937 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)7938 #define TMPL_MEM_FN_SUFF U1287939 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7940 #define TMPL_MEM_FMT_DESC "dqword"7941 #include "IEMAllMemRWTmpl.cpp.h"7942 7943 #define TMPL_MEM_TYPE RTUINT128U7944 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)7945 #define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)7946 #define TMPL_MEM_FN_SUFF U128AlignedSse7947 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7948 #define TMPL_MEM_FMT_DESC "dqword"7949 #include "IEMAllMemRWTmpl.cpp.h"7950 7951 #define TMPL_MEM_TYPE RTUINT128U7952 #define TMPL_MEM_TYPE_ALIGN 07953 #define TMPL_MEM_FN_SUFF U128NoAc7954 #define TMPL_MEM_FMT_TYPE "%.16Rhxs"7955 #define TMPL_MEM_FMT_DESC "dqword"7956 #include "IEMAllMemRWTmpl.cpp.h"7957 7958 #define TMPL_MEM_TYPE RTUINT256U7959 #define TMPL_MEM_TYPE_ALIGN 07960 #define TMPL_MEM_FN_SUFF U256NoAc7961 #define TMPL_MEM_FMT_TYPE "%.32Rhxs"7962 #define TMPL_MEM_FMT_DESC "qqword"7963 #include "IEMAllMemRWTmpl.cpp.h"7964 7965 #define TMPL_MEM_TYPE RTUINT256U7966 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)7967 #define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP7968 #define TMPL_MEM_FN_SUFF U256AlignedAvx7969 #define TMPL_MEM_FMT_TYPE "%.32Rhxs"7970 #define TMPL_MEM_FMT_DESC "qqword"7971 #include "IEMAllMemRWTmpl.cpp.h"7972 7973 /**7974 * Fetches a data dword and zero extends it to a qword.7975 *7976 * @returns Strict VBox status code.7977 * @param pVCpu The cross context virtual CPU structure of the calling thread.7978 * @param pu64Dst Where to return the qword.7979 * @param iSegReg The index of the segment register to use for7980 * this access. The base and limits are checked.7981 * @param GCPtrMem The address of the guest memory.7982 */7983 VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT7984 {7985 /* The lazy approach for now... */7986 uint8_t bUnmapInfo;7987 uint32_t const *pu32Src;7988 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,7989 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);7990 if (rc == VINF_SUCCESS)7991 {7992 *pu64Dst = *pu32Src;7993 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);7994 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));7995 }7996 return rc;7997 }7998 7999 8000 #ifdef SOME_UNUSED_FUNCTION8001 /**8002 * Fetches a data dword and sign extends it to a qword.8003 *8004 * @returns Strict VBox status code.8005 * @param pVCpu The cross context virtual CPU structure of the calling thread.8006 * @param pu64Dst Where to return the sign extended value.8007 * @param iSegReg The index of the segment register to use for8008 * this access. The base and limits are checked.8009 * @param GCPtrMem The address of the guest memory.8010 */8011 VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8012 {8013 /* The lazy approach for now... */8014 uint8_t bUnmapInfo;8015 int32_t const *pi32Src;8016 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,8017 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);8018 if (rc == VINF_SUCCESS)8019 {8020 *pu64Dst = *pi32Src;8021 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8022 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));8023 }8024 #ifdef __GNUC__ /* warning: GCC may be a royal pain */8025 else8026 *pu64Dst = 0;8027 #endif8028 return rc;8029 }8030 #endif8031 8032 8033 /**8034 * Fetches a descriptor register (lgdt, lidt).8035 *8036 * @returns Strict VBox status code.8037 * @param pVCpu The cross context virtual CPU structure of the calling thread.8038 * @param pcbLimit Where to return the limit.8039 * @param pGCPtrBase Where to return the base.8040 * @param iSegReg The index of the segment register to use for8041 * this access. The base and limits are checked.8042 * @param GCPtrMem The address of the guest memory.8043 * @param enmOpSize The effective operand size.8044 */8045 VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,8046 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT8047 {8048 /*8049 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a8050 * little special:8051 * - The two reads are done separately.8052 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.8053 * - We suspect the 386 to actually commit the limit before the base in8054 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We8055 * don't try emulate this eccentric behavior, because it's not well8056 * enough understood and rather hard to trigger.8057 * - The 486 seems to do a dword limit read when the operand size is 32-bit.8058 */8059 VBOXSTRICTRC rcStrict;8060 if (IEM_IS_64BIT_CODE(pVCpu))8061 {8062 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8063 if (rcStrict == VINF_SUCCESS)8064 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);8065 }8066 else8067 {8068 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */8069 if (enmOpSize == IEMMODE_32BIT)8070 {8071 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)8072 {8073 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8074 if (rcStrict == VINF_SUCCESS)8075 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8076 }8077 else8078 {8079 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);8080 if (rcStrict == VINF_SUCCESS)8081 {8082 *pcbLimit = (uint16_t)uTmp;8083 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8084 }8085 }8086 if (rcStrict == VINF_SUCCESS)8087 *pGCPtrBase = uTmp;8088 }8089 else8090 {8091 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);8092 if (rcStrict == VINF_SUCCESS)8093 {8094 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);8095 if (rcStrict == VINF_SUCCESS)8096 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);8097 }8098 }8099 }8100 return rcStrict;8101 }8102 8103 8104 /**8105 * Stores a data dqword, SSE aligned.8106 *8107 * @returns Strict VBox status code.8108 * @param pVCpu The cross context virtual CPU structure of the calling thread.8109 * @param iSegReg The index of the segment register to use for8110 * this access. The base and limits are checked.8111 * @param GCPtrMem The address of the guest memory.8112 * @param u128Value The value to store.8113 */8114 VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT8115 {8116 /* The lazy approach for now... */8117 uint8_t bUnmapInfo;8118 PRTUINT128U pu128Dst;8119 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,8120 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);8121 if (rc == VINF_SUCCESS)8122 {8123 pu128Dst->au64[0] = u128Value.au64[0];8124 pu128Dst->au64[1] = u128Value.au64[1];8125 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8126 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));8127 }8128 return rc;8129 }8130 8131 8132 #ifdef IEM_WITH_SETJMP8133 /**8134 * Stores a data dqword, SSE aligned.8135 *8136 * @returns Strict VBox status code.8137 * @param pVCpu The cross context virtual CPU structure of the calling thread.8138 * @param iSegReg The index of the segment register to use for8139 * this access. The base and limits are checked.8140 * @param GCPtrMem The address of the guest memory.8141 * @param u128Value The value to store.8142 */8143 void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,8144 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP8145 {8146 /* The lazy approach for now... */8147 uint8_t bUnmapInfo;8148 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,8149 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);8150 pu128Dst->au64[0] = u128Value.au64[0];8151 pu128Dst->au64[1] = u128Value.au64[1];8152 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);8153 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));8154 }8155 #endif8156 8157 8158 /**8159 * Stores a data dqword.8160 *8161 * @returns Strict VBox status code.8162 * @param pVCpu The cross context virtual CPU structure of the calling thread.8163 * @param iSegReg The index of the segment register to use for8164 * this access. The base and limits are checked.8165 * @param GCPtrMem The address of the guest memory.8166 * @param pu256Value Pointer to the value to store.8167 */8168 VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT8169 {8170 /* The lazy approach for now... */8171 uint8_t bUnmapInfo;8172 PRTUINT256U pu256Dst;8173 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,8174 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);8175 if (rc == VINF_SUCCESS)8176 {8177 pu256Dst->au64[0] = pu256Value->au64[0];8178 pu256Dst->au64[1] = pu256Value->au64[1];8179 pu256Dst->au64[2] = pu256Value->au64[2];8180 pu256Dst->au64[3] = pu256Value->au64[3];8181 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8182 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));8183 }8184 return rc;8185 }8186 8187 8188 #ifdef IEM_WITH_SETJMP8189 /**8190 * Stores a data dqword, longjmp on error.8191 *8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.8193 * @param iSegReg The index of the segment register to use for8194 * this access. The base and limits are checked.8195 * @param GCPtrMem The address of the guest memory.8196 * @param pu256Value Pointer to the value to store.8197 */8198 void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP8199 {8200 /* The lazy approach for now... */8201 uint8_t bUnmapInfo;8202 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,8203 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);8204 pu256Dst->au64[0] = pu256Value->au64[0];8205 pu256Dst->au64[1] = pu256Value->au64[1];8206 pu256Dst->au64[2] = pu256Value->au64[2];8207 pu256Dst->au64[3] = pu256Value->au64[3];8208 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);8209 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));8210 }8211 #endif8212 8213 8214 /**8215 * Stores a descriptor register (sgdt, sidt).8216 *8217 * @returns Strict VBox status code.8218 * @param pVCpu The cross context virtual CPU structure of the calling thread.8219 * @param cbLimit The limit.8220 * @param GCPtrBase The base address.8221 * @param iSegReg The index of the segment register to use for8222 * this access. The base and limits are checked.8223 * @param GCPtrMem The address of the guest memory.8224 */8225 VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8226 {8227 /*8228 * The SIDT and SGDT instructions actually stores the data using two8229 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions8230 * does not respond to opsize prefixes.8231 */8232 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);8233 if (rcStrict == VINF_SUCCESS)8234 {8235 if (IEM_IS_16BIT_CODE(pVCpu))8236 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,8237 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_2868238 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);8239 else if (IEM_IS_32BIT_CODE(pVCpu))8240 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);8241 else8242 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);8243 }8244 return rcStrict;8245 }8246 8247 8248 /**8249 * Begin a special stack push (used by interrupt, exceptions and such).8250 *8251 * This will raise \#SS or \#PF if appropriate.8252 *8253 * @returns Strict VBox status code.8254 * @param pVCpu The cross context virtual CPU structure of the calling thread.8255 * @param cbMem The number of bytes to push onto the stack.8256 * @param cbAlign The alignment mask (7, 3, 1).8257 * @param ppvMem Where to return the pointer to the stack memory.8258 * As with the other memory functions this could be8259 * direct access or bounce buffered access, so8260 * don't commit register until the commit call8261 * succeeds.8262 * @param pbUnmapInfo Where to store unmap info for8263 * iemMemStackPushCommitSpecial.8264 * @param puNewRsp Where to return the new RSP value. This must be8265 * passed unchanged to8266 * iemMemStackPushCommitSpecial().8267 */8268 VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,8269 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT8270 {8271 Assert(cbMem < UINT8_MAX);8272 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);8273 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);8274 }8275 8276 8277 /**8278 * Commits a special stack push (started by iemMemStackPushBeginSpecial).8279 *8280 * This will update the rSP.8281 *8282 * @returns Strict VBox status code.8283 * @param pVCpu The cross context virtual CPU structure of the calling thread.8284 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.8285 * @param uNewRsp The new RSP value returned by8286 * iemMemStackPushBeginSpecial().8287 */8288 VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT8289 {8290 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8291 if (rcStrict == VINF_SUCCESS)8292 pVCpu->cpum.GstCtx.rsp = uNewRsp;8293 return rcStrict;8294 }8295 8296 8297 /**8298 * Begin a special stack pop (used by iret, retf and such).8299 *8300 * This will raise \#SS or \#PF if appropriate.8301 *8302 * @returns Strict VBox status code.8303 * @param pVCpu The cross context virtual CPU structure of the calling thread.8304 * @param cbMem The number of bytes to pop from the stack.8305 * @param cbAlign The alignment mask (7, 3, 1).8306 * @param ppvMem Where to return the pointer to the stack memory.8307 * @param pbUnmapInfo Where to store unmap info for8308 * iemMemStackPopDoneSpecial.8309 * @param puNewRsp Where to return the new RSP value. This must be8310 * assigned to CPUMCTX::rsp manually some time8311 * after iemMemStackPopDoneSpecial() has been8312 * called.8313 */8314 VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,8315 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT8316 {8317 Assert(cbMem < UINT8_MAX);8318 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);8319 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);8320 }8321 8322 8323 /**8324 * Continue a special stack pop (used by iret and retf), for the purpose of8325 * retrieving a new stack pointer.8326 *8327 * This will raise \#SS or \#PF if appropriate.8328 *8329 * @returns Strict VBox status code.8330 * @param pVCpu The cross context virtual CPU structure of the calling thread.8331 * @param off Offset from the top of the stack. This is zero8332 * except in the retf case.8333 * @param cbMem The number of bytes to pop from the stack.8334 * @param ppvMem Where to return the pointer to the stack memory.8335 * @param pbUnmapInfo Where to store unmap info for8336 * iemMemStackPopDoneSpecial.8337 * @param uCurNewRsp The current uncommitted RSP value. (No need to8338 * return this because all use of this function is8339 * to retrieve a new value and anything we return8340 * here would be discarded.)8341 */8342 VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,8343 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT8344 {8345 Assert(cbMem < UINT8_MAX);8346 8347 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */8348 RTGCPTR GCPtrTop;8349 if (IEM_IS_64BIT_CODE(pVCpu))8350 GCPtrTop = uCurNewRsp;8351 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)8352 GCPtrTop = (uint32_t)uCurNewRsp;8353 else8354 GCPtrTop = (uint16_t)uCurNewRsp;8355 8356 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,8357 0 /* checked in iemMemStackPopBeginSpecial */);8358 }8359 8360 8361 /**8362 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or8363 * iemMemStackPopContinueSpecial).8364 *8365 * The caller will manually commit the rSP.8366 *8367 * @returns Strict VBox status code.8368 * @param pVCpu The cross context virtual CPU structure of the calling thread.8369 * @param bUnmapInfo Unmap information returned by8370 * iemMemStackPopBeginSpecial() or8371 * iemMemStackPopContinueSpecial().8372 */8373 VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT8374 {8375 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8376 }8377 8378 8379 /**8380 * Fetches a system table byte.8381 *8382 * @returns Strict VBox status code.8383 * @param pVCpu The cross context virtual CPU structure of the calling thread.8384 * @param pbDst Where to return the byte.8385 * @param iSegReg The index of the segment register to use for8386 * this access. The base and limits are checked.8387 * @param GCPtrMem The address of the guest memory.8388 */8389 VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8390 {8391 /* The lazy approach for now... */8392 uint8_t bUnmapInfo;8393 uint8_t const *pbSrc;8394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8395 if (rc == VINF_SUCCESS)8396 {8397 *pbDst = *pbSrc;8398 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8399 }8400 return rc;8401 }8402 8403 8404 /**8405 * Fetches a system table word.8406 *8407 * @returns Strict VBox status code.8408 * @param pVCpu The cross context virtual CPU structure of the calling thread.8409 * @param pu16Dst Where to return the word.8410 * @param iSegReg The index of the segment register to use for8411 * this access. The base and limits are checked.8412 * @param GCPtrMem The address of the guest memory.8413 */8414 VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8415 {8416 /* The lazy approach for now... */8417 uint8_t bUnmapInfo;8418 uint16_t const *pu16Src;8419 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8420 if (rc == VINF_SUCCESS)8421 {8422 *pu16Dst = *pu16Src;8423 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8424 }8425 return rc;8426 }8427 8428 8429 /**8430 * Fetches a system table dword.8431 *8432 * @returns Strict VBox status code.8433 * @param pVCpu The cross context virtual CPU structure of the calling thread.8434 * @param pu32Dst Where to return the dword.8435 * @param iSegReg The index of the segment register to use for8436 * this access. The base and limits are checked.8437 * @param GCPtrMem The address of the guest memory.8438 */8439 VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8440 {8441 /* The lazy approach for now... */8442 uint8_t bUnmapInfo;8443 uint32_t const *pu32Src;8444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8445 if (rc == VINF_SUCCESS)8446 {8447 *pu32Dst = *pu32Src;8448 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8449 }8450 return rc;8451 }8452 8453 8454 /**8455 * Fetches a system table qword.8456 *8457 * @returns Strict VBox status code.8458 * @param pVCpu The cross context virtual CPU structure of the calling thread.8459 * @param pu64Dst Where to return the qword.8460 * @param iSegReg The index of the segment register to use for8461 * this access. The base and limits are checked.8462 * @param GCPtrMem The address of the guest memory.8463 */8464 VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT8465 {8466 /* The lazy approach for now... */8467 uint8_t bUnmapInfo;8468 uint64_t const *pu64Src;8469 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);8470 if (rc == VINF_SUCCESS)8471 {8472 *pu64Dst = *pu64Src;8473 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8474 }8475 return rc;8476 }8477 8478 8479 /**8480 * Fetches a descriptor table entry with caller specified error code.8481 *8482 * @returns Strict VBox status code.8483 * @param pVCpu The cross context virtual CPU structure of the calling thread.8484 * @param pDesc Where to return the descriptor table entry.8485 * @param uSel The selector which table entry to fetch.8486 * @param uXcpt The exception to raise on table lookup error.8487 * @param uErrorCode The error code associated with the exception.8488 */8489 static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,8490 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT8491 {8492 AssertPtr(pDesc);8493 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);8494 8495 /** @todo did the 286 require all 8 bytes to be accessible? */8496 /*8497 * Get the selector table base and check bounds.8498 */8499 RTGCPTR GCPtrBase;8500 if (uSel & X86_SEL_LDT)8501 {8502 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present8503 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )8504 {8505 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",8506 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));8507 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,8508 uErrorCode, 0);8509 }8510 8511 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);8512 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;8513 }8514 else8515 {8516 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)8517 {8518 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));8519 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,8520 uErrorCode, 0);8521 }8522 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;8523 }8524 8525 /*8526 * Read the legacy descriptor and maybe the long mode extensions if8527 * required.8528 */8529 VBOXSTRICTRC rcStrict;8530 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)8531 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));8532 else8533 {8534 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);8535 if (rcStrict == VINF_SUCCESS)8536 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);8537 if (rcStrict == VINF_SUCCESS)8538 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);8539 if (rcStrict == VINF_SUCCESS)8540 pDesc->Legacy.au16[3] = 0;8541 else8542 return rcStrict;8543 }8544 8545 if (rcStrict == VINF_SUCCESS)8546 {8547 if ( !IEM_IS_LONG_MODE(pVCpu)8548 || pDesc->Legacy.Gen.u1DescType)8549 pDesc->Long.au64[1] = 0;8550 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 88551 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))8552 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);8553 else8554 {8555 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));8556 /** @todo is this the right exception? */8557 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);8558 }8559 }8560 return rcStrict;8561 }8562 8563 8564 /**8565 * Fetches a descriptor table entry.8566 *8567 * @returns Strict VBox status code.8568 * @param pVCpu The cross context virtual CPU structure of the calling thread.8569 * @param pDesc Where to return the descriptor table entry.8570 * @param uSel The selector which table entry to fetch.8571 * @param uXcpt The exception to raise on table lookup error.8572 */8573 VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT8574 {8575 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);8576 }8577 8578 8579 /**8580 * Marks the selector descriptor as accessed (only non-system descriptors).8581 *8582 * This function ASSUMES that iemMemFetchSelDesc has be called previously and8583 * will therefore skip the limit checks.8584 *8585 * @returns Strict VBox status code.8586 * @param pVCpu The cross context virtual CPU structure of the calling thread.8587 * @param uSel The selector.8588 */8589 VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT8590 {8591 /*8592 * Get the selector table base and calculate the entry address.8593 */8594 RTGCPTR GCPtr = uSel & X86_SEL_LDT8595 ? pVCpu->cpum.GstCtx.ldtr.u64Base8596 : pVCpu->cpum.GstCtx.gdtr.pGdt;8597 GCPtr += uSel & X86_SEL_MASK;8598 8599 /*8600 * ASMAtomicBitSet will assert if the address is misaligned, so do some8601 * ugly stuff to avoid this. This will make sure it's an atomic access8602 * as well more or less remove any question about 8-bit or 32-bit accesss.8603 */8604 VBOXSTRICTRC rcStrict;8605 uint8_t bUnmapInfo;8606 uint32_t volatile *pu32;8607 if ((GCPtr & 3) == 0)8608 {8609 /* The normal case, map the 32-bit bits around the accessed bit (40). */8610 GCPtr += 2 + 2;8611 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8612 if (rcStrict != VINF_SUCCESS)8613 return rcStrict;8614 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */8615 }8616 else8617 {8618 /* The misaligned GDT/LDT case, map the whole thing. */8619 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);8620 if (rcStrict != VINF_SUCCESS)8621 return rcStrict;8622 switch ((uintptr_t)pu32 & 3)8623 {8624 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;8625 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;8626 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;8627 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;8628 }8629 }8630 8631 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);8632 }8633 8634 8635 #undef LOG_GROUP8636 #define LOG_GROUP LOG_GROUP_IEM8637 8638 /** @} */8639 8640 /** @name Opcode Helpers.8641 * @{8642 */8643 8644 /**8645 * Calculates the effective address of a ModR/M memory operand.8646 *8647 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.8648 *8649 * @return Strict VBox status code.8650 * @param pVCpu The cross context virtual CPU structure of the calling thread.8651 * @param bRm The ModRM byte.8652 * @param cbImmAndRspOffset - First byte: The size of any immediate8653 * following the effective address opcode bytes8654 * (only for RIP relative addressing).8655 * - Second byte: RSP displacement (for POP [ESP]).8656 * @param pGCPtrEff Where to return the effective address.8657 */8658 VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT8659 {8660 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));8661 # define SET_SS_DEF() \8662 do \8663 { \8664 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \8665 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \8666 } while (0)8667 8668 if (!IEM_IS_64BIT_CODE(pVCpu))8669 {8670 /** @todo Check the effective address size crap! */8671 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)8672 {8673 uint16_t u16EffAddr;8674 8675 /* Handle the disp16 form with no registers first. */8676 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)8677 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);8678 else8679 {8680 /* Get the displacment. */8681 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8682 {8683 case 0: u16EffAddr = 0; break;8684 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;8685 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;8686 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */8687 }8688 8689 /* Add the base and index registers to the disp. */8690 switch (bRm & X86_MODRM_RM_MASK)8691 {8692 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;8693 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;8694 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;8695 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;8696 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;8697 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;8698 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;8699 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;8700 }8701 }8702 8703 *pGCPtrEff = u16EffAddr;8704 }8705 else8706 {8707 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8708 uint32_t u32EffAddr;8709 8710 /* Handle the disp32 form with no registers first. */8711 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)8712 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);8713 else8714 {8715 /* Get the register (or SIB) value. */8716 switch ((bRm & X86_MODRM_RM_MASK))8717 {8718 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;8719 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;8720 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;8721 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;8722 case 4: /* SIB */8723 {8724 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);8725 8726 /* Get the index and scale it. */8727 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)8728 {8729 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;8730 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;8731 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;8732 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;8733 case 4: u32EffAddr = 0; /*none */ break;8734 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;8735 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;8736 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;8737 IEM_NOT_REACHED_DEFAULT_CASE_RET();8738 }8739 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;8740 8741 /* add base */8742 switch (bSib & X86_SIB_BASE_MASK)8743 {8744 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;8745 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;8746 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;8747 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;8748 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;8749 case 5:8750 if ((bRm & X86_MODRM_MOD_MASK) != 0)8751 {8752 u32EffAddr += pVCpu->cpum.GstCtx.ebp;8753 SET_SS_DEF();8754 }8755 else8756 {8757 uint32_t u32Disp;8758 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8759 u32EffAddr += u32Disp;8760 }8761 break;8762 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;8763 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;8764 IEM_NOT_REACHED_DEFAULT_CASE_RET();8765 }8766 break;8767 }8768 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;8769 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;8770 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;8771 IEM_NOT_REACHED_DEFAULT_CASE_RET();8772 }8773 8774 /* Get and add the displacement. */8775 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8776 {8777 case 0:8778 break;8779 case 1:8780 {8781 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);8782 u32EffAddr += i8Disp;8783 break;8784 }8785 case 2:8786 {8787 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);8788 u32EffAddr += u32Disp;8789 break;8790 }8791 default:8792 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */8793 }8794 8795 }8796 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8797 *pGCPtrEff = u32EffAddr;8798 }8799 }8800 else8801 {8802 uint64_t u64EffAddr;8803 8804 /* Handle the rip+disp32 form with no registers first. */8805 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)8806 {8807 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);8808 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));8809 }8810 else8811 {8812 /* Get the register (or SIB) value. */8813 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)8814 {8815 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;8816 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;8817 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;8818 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;8819 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;8820 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;8821 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;8822 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;8823 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;8824 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;8825 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;8826 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;8827 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;8828 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;8829 /* SIB */8830 case 4:8831 case 12:8832 {8833 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);8834 8835 /* Get the index and scale it. */8836 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)8837 {8838 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;8839 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;8840 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;8841 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;8842 case 4: u64EffAddr = 0; /*none */ break;8843 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;8844 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;8845 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;8846 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;8847 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;8848 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;8849 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;8850 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;8851 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;8852 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;8853 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;8854 IEM_NOT_REACHED_DEFAULT_CASE_RET();8855 }8856 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;8857 8858 /* add base */8859 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)8860 {8861 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;8862 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;8863 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;8864 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;8865 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;8866 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;8867 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;8868 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;8869 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;8870 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;8871 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;8872 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;8873 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;8874 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;8875 /* complicated encodings */8876 case 5:8877 case 13:8878 if ((bRm & X86_MODRM_MOD_MASK) != 0)8879 {8880 if (!pVCpu->iem.s.uRexB)8881 {8882 u64EffAddr += pVCpu->cpum.GstCtx.rbp;8883 SET_SS_DEF();8884 }8885 else8886 u64EffAddr += pVCpu->cpum.GstCtx.r13;8887 }8888 else8889 {8890 uint32_t u32Disp;8891 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8892 u64EffAddr += (int32_t)u32Disp;8893 }8894 break;8895 IEM_NOT_REACHED_DEFAULT_CASE_RET();8896 }8897 break;8898 }8899 IEM_NOT_REACHED_DEFAULT_CASE_RET();8900 }8901 8902 /* Get and add the displacement. */8903 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8904 {8905 case 0:8906 break;8907 case 1:8908 {8909 int8_t i8Disp;8910 IEM_OPCODE_GET_NEXT_S8(&i8Disp);8911 u64EffAddr += i8Disp;8912 break;8913 }8914 case 2:8915 {8916 uint32_t u32Disp;8917 IEM_OPCODE_GET_NEXT_U32(&u32Disp);8918 u64EffAddr += (int32_t)u32Disp;8919 break;8920 }8921 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */8922 }8923 8924 }8925 8926 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)8927 *pGCPtrEff = u64EffAddr;8928 else8929 {8930 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);8931 *pGCPtrEff = u64EffAddr & UINT32_MAX;8932 }8933 }8934 8935 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));8936 return VINF_SUCCESS;8937 }8938 8939 8940 #ifdef IEM_WITH_SETJMP8941 /**8942 * Calculates the effective address of a ModR/M memory operand.8943 *8944 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.8945 *8946 * May longjmp on internal error.8947 *8948 * @return The effective address.8949 * @param pVCpu The cross context virtual CPU structure of the calling thread.8950 * @param bRm The ModRM byte.8951 * @param cbImmAndRspOffset - First byte: The size of any immediate8952 * following the effective address opcode bytes8953 * (only for RIP relative addressing).8954 * - Second byte: RSP displacement (for POP [ESP]).8955 */8956 RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP8957 {8958 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));8959 # define SET_SS_DEF() \8960 do \8961 { \8962 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \8963 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \8964 } while (0)8965 8966 if (!IEM_IS_64BIT_CODE(pVCpu))8967 {8968 /** @todo Check the effective address size crap! */8969 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)8970 {8971 uint16_t u16EffAddr;8972 8973 /* Handle the disp16 form with no registers first. */8974 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)8975 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);8976 else8977 {8978 /* Get the displacment. */8979 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)8980 {8981 case 0: u16EffAddr = 0; break;8982 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;8983 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;8984 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */8985 }8986 8987 /* Add the base and index registers to the disp. */8988 switch (bRm & X86_MODRM_RM_MASK)8989 {8990 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;8991 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;8992 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;8993 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;8994 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;8995 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;8996 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;8997 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;8998 }8999 }9000 9001 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));9002 return u16EffAddr;9003 }9004 9005 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9006 uint32_t u32EffAddr;9007 9008 /* Handle the disp32 form with no registers first. */9009 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9010 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);9011 else9012 {9013 /* Get the register (or SIB) value. */9014 switch ((bRm & X86_MODRM_RM_MASK))9015 {9016 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9017 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9018 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9019 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9020 case 4: /* SIB */9021 {9022 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9023 9024 /* Get the index and scale it. */9025 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)9026 {9027 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9028 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9029 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9030 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9031 case 4: u32EffAddr = 0; /*none */ break;9032 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;9033 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9034 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9035 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9036 }9037 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9038 9039 /* add base */9040 switch (bSib & X86_SIB_BASE_MASK)9041 {9042 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;9043 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;9044 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;9045 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;9046 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9047 case 5:9048 if ((bRm & X86_MODRM_MOD_MASK) != 0)9049 {9050 u32EffAddr += pVCpu->cpum.GstCtx.ebp;9051 SET_SS_DEF();9052 }9053 else9054 {9055 uint32_t u32Disp;9056 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9057 u32EffAddr += u32Disp;9058 }9059 break;9060 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;9061 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;9062 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9063 }9064 break;9065 }9066 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;9067 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9068 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9069 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9070 }9071 9072 /* Get and add the displacement. */9073 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9074 {9075 case 0:9076 break;9077 case 1:9078 {9079 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);9080 u32EffAddr += i8Disp;9081 break;9082 }9083 case 2:9084 {9085 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);9086 u32EffAddr += u32Disp;9087 break;9088 }9089 default:9090 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */9091 }9092 }9093 9094 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9095 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));9096 return u32EffAddr;9097 }9098 9099 uint64_t u64EffAddr;9100 9101 /* Handle the rip+disp32 form with no registers first. */9102 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9103 {9104 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);9105 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));9106 }9107 else9108 {9109 /* Get the register (or SIB) value. */9110 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)9111 {9112 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9113 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9114 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9115 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9116 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;9117 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9118 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9119 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9120 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9121 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9122 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9123 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9124 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9125 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9126 /* SIB */9127 case 4:9128 case 12:9129 {9130 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9131 9132 /* Get the index and scale it. */9133 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)9134 {9135 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9136 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9137 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9138 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9139 case 4: u64EffAddr = 0; /*none */ break;9140 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;9141 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9142 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9143 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9144 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9145 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9146 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9147 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;9148 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9149 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9150 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9151 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9152 }9153 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9154 9155 /* add base */9156 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)9157 {9158 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;9159 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;9160 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;9161 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;9162 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9163 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;9164 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;9165 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;9166 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;9167 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;9168 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;9169 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;9170 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;9171 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;9172 /* complicated encodings */9173 case 5:9174 case 13:9175 if ((bRm & X86_MODRM_MOD_MASK) != 0)9176 {9177 if (!pVCpu->iem.s.uRexB)9178 {9179 u64EffAddr += pVCpu->cpum.GstCtx.rbp;9180 SET_SS_DEF();9181 }9182 else9183 u64EffAddr += pVCpu->cpum.GstCtx.r13;9184 }9185 else9186 {9187 uint32_t u32Disp;9188 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9189 u64EffAddr += (int32_t)u32Disp;9190 }9191 break;9192 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9193 }9194 break;9195 }9196 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9197 }9198 9199 /* Get and add the displacement. */9200 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9201 {9202 case 0:9203 break;9204 case 1:9205 {9206 int8_t i8Disp;9207 IEM_OPCODE_GET_NEXT_S8(&i8Disp);9208 u64EffAddr += i8Disp;9209 break;9210 }9211 case 2:9212 {9213 uint32_t u32Disp;9214 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9215 u64EffAddr += (int32_t)u32Disp;9216 break;9217 }9218 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */9219 }9220 9221 }9222 9223 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)9224 {9225 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));9226 return u64EffAddr;9227 }9228 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9229 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));9230 return u64EffAddr & UINT32_MAX;9231 }9232 #endif /* IEM_WITH_SETJMP */9233 9234 9235 /**9236 * Calculates the effective address of a ModR/M memory operand, extended version9237 * for use in the recompilers.9238 *9239 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.9240 *9241 * @return Strict VBox status code.9242 * @param pVCpu The cross context virtual CPU structure of the calling thread.9243 * @param bRm The ModRM byte.9244 * @param cbImmAndRspOffset - First byte: The size of any immediate9245 * following the effective address opcode bytes9246 * (only for RIP relative addressing).9247 * - Second byte: RSP displacement (for POP [ESP]).9248 * @param pGCPtrEff Where to return the effective address.9249 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and9250 * SIB byte (bits 39:32).9251 */9252 VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT9253 {9254 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));9255 # define SET_SS_DEF() \9256 do \9257 { \9258 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \9259 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \9260 } while (0)9261 9262 uint64_t uInfo;9263 if (!IEM_IS_64BIT_CODE(pVCpu))9264 {9265 /** @todo Check the effective address size crap! */9266 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)9267 {9268 uint16_t u16EffAddr;9269 9270 /* Handle the disp16 form with no registers first. */9271 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)9272 {9273 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);9274 uInfo = u16EffAddr;9275 }9276 else9277 {9278 /* Get the displacment. */9279 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9280 {9281 case 0: u16EffAddr = 0; break;9282 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;9283 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;9284 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */9285 }9286 uInfo = u16EffAddr;9287 9288 /* Add the base and index registers to the disp. */9289 switch (bRm & X86_MODRM_RM_MASK)9290 {9291 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;9292 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;9293 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;9294 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;9295 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;9296 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;9297 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;9298 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;9299 }9300 }9301 9302 *pGCPtrEff = u16EffAddr;9303 }9304 else9305 {9306 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9307 uint32_t u32EffAddr;9308 9309 /* Handle the disp32 form with no registers first. */9310 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9311 {9312 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);9313 uInfo = u32EffAddr;9314 }9315 else9316 {9317 /* Get the register (or SIB) value. */9318 uInfo = 0;9319 switch ((bRm & X86_MODRM_RM_MASK))9320 {9321 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9322 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9323 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9324 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9325 case 4: /* SIB */9326 {9327 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9328 uInfo = (uint64_t)bSib << 32;9329 9330 /* Get the index and scale it. */9331 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)9332 {9333 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9334 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9335 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9336 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9337 case 4: u32EffAddr = 0; /*none */ break;9338 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;9339 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9340 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9341 IEM_NOT_REACHED_DEFAULT_CASE_RET();9342 }9343 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9344 9345 /* add base */9346 switch (bSib & X86_SIB_BASE_MASK)9347 {9348 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;9349 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;9350 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;9351 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;9352 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9353 case 5:9354 if ((bRm & X86_MODRM_MOD_MASK) != 0)9355 {9356 u32EffAddr += pVCpu->cpum.GstCtx.ebp;9357 SET_SS_DEF();9358 }9359 else9360 {9361 uint32_t u32Disp;9362 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9363 u32EffAddr += u32Disp;9364 uInfo |= u32Disp;9365 }9366 break;9367 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;9368 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;9369 IEM_NOT_REACHED_DEFAULT_CASE_RET();9370 }9371 break;9372 }9373 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;9374 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9375 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9376 IEM_NOT_REACHED_DEFAULT_CASE_RET();9377 }9378 9379 /* Get and add the displacement. */9380 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9381 {9382 case 0:9383 break;9384 case 1:9385 {9386 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);9387 u32EffAddr += i8Disp;9388 uInfo |= (uint32_t)(int32_t)i8Disp;9389 break;9390 }9391 case 2:9392 {9393 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);9394 u32EffAddr += u32Disp;9395 uInfo |= (uint32_t)u32Disp;9396 break;9397 }9398 default:9399 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */9400 }9401 9402 }9403 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9404 *pGCPtrEff = u32EffAddr;9405 }9406 }9407 else9408 {9409 uint64_t u64EffAddr;9410 9411 /* Handle the rip+disp32 form with no registers first. */9412 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9413 {9414 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);9415 uInfo = (uint32_t)u64EffAddr;9416 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));9417 }9418 else9419 {9420 /* Get the register (or SIB) value. */9421 uInfo = 0;9422 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)9423 {9424 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9425 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9426 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9427 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9428 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;9429 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9430 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9431 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9432 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9433 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9434 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9435 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9436 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9437 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9438 /* SIB */9439 case 4:9440 case 12:9441 {9442 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9443 uInfo = (uint64_t)bSib << 32;9444 9445 /* Get the index and scale it. */9446 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)9447 {9448 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9449 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9450 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9451 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9452 case 4: u64EffAddr = 0; /*none */ break;9453 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;9454 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9455 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9456 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9457 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9458 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9459 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9460 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;9461 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9462 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9463 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9464 IEM_NOT_REACHED_DEFAULT_CASE_RET();9465 }9466 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9467 9468 /* add base */9469 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)9470 {9471 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;9472 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;9473 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;9474 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;9475 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9476 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;9477 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;9478 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;9479 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;9480 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;9481 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;9482 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;9483 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;9484 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;9485 /* complicated encodings */9486 case 5:9487 case 13:9488 if ((bRm & X86_MODRM_MOD_MASK) != 0)9489 {9490 if (!pVCpu->iem.s.uRexB)9491 {9492 u64EffAddr += pVCpu->cpum.GstCtx.rbp;9493 SET_SS_DEF();9494 }9495 else9496 u64EffAddr += pVCpu->cpum.GstCtx.r13;9497 }9498 else9499 {9500 uint32_t u32Disp;9501 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9502 u64EffAddr += (int32_t)u32Disp;9503 uInfo |= u32Disp;9504 }9505 break;9506 IEM_NOT_REACHED_DEFAULT_CASE_RET();9507 }9508 break;9509 }9510 IEM_NOT_REACHED_DEFAULT_CASE_RET();9511 }9512 9513 /* Get and add the displacement. */9514 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9515 {9516 case 0:9517 break;9518 case 1:9519 {9520 int8_t i8Disp;9521 IEM_OPCODE_GET_NEXT_S8(&i8Disp);9522 u64EffAddr += i8Disp;9523 uInfo |= (uint32_t)(int32_t)i8Disp;9524 break;9525 }9526 case 2:9527 {9528 uint32_t u32Disp;9529 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9530 u64EffAddr += (int32_t)u32Disp;9531 uInfo |= u32Disp;9532 break;9533 }9534 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */9535 }9536 9537 }9538 9539 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)9540 *pGCPtrEff = u64EffAddr;9541 else9542 {9543 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9544 *pGCPtrEff = u64EffAddr & UINT32_MAX;9545 }9546 }9547 *puInfo = uInfo;9548 9549 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));9550 return VINF_SUCCESS;9551 }9552 9553 /** @} */9554 9555 9556 #ifdef LOG_ENABLED9557 /**9558 * Logs the current instruction.9559 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9560 * @param fSameCtx Set if we have the same context information as the VMM,9561 * clear if we may have already executed an instruction in9562 * our debug context. When clear, we assume IEMCPU holds9563 * valid CPU mode info.9564 *9565 * The @a fSameCtx parameter is now misleading and obsolete.9566 * @param pszFunction The IEM function doing the execution.9567 */9568 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT9569 {9570 # ifdef IN_RING39571 if (LogIs2Enabled())9572 {9573 char szInstr[256];9574 uint32_t cbInstr = 0;9575 if (fSameCtx)9576 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,9577 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,9578 szInstr, sizeof(szInstr), &cbInstr);9579 else9580 {9581 uint32_t fFlags = 0;9582 switch (IEM_GET_CPU_MODE(pVCpu))9583 {9584 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;9585 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;9586 case IEMMODE_16BIT:9587 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)9588 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;9589 else9590 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;9591 break;9592 }9593 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,9594 szInstr, sizeof(szInstr), &cbInstr);9595 }9596 9597 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;9598 Log2(("**** %s fExec=%x\n"9599 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"9600 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"9601 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"9602 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"9603 " %s\n"9604 , pszFunction, pVCpu->iem.s.fExec,9605 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,9606 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,9607 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,9608 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,9609 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,9610 szInstr));9611 9612 /* This stuff sucks atm. as it fills the log with MSRs. */9613 //if (LogIs3Enabled())9614 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);9615 }9616 else9617 # endif9618 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,9619 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));9620 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);9621 }9622 #endif /* LOG_ENABLED */9623 9624 9625 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9626 /**9627 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,9628 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.9629 *9630 * @returns Modified rcStrict.9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.9632 * @param rcStrict The instruction execution status.9633 */9634 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT9635 {9636 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));9637 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))9638 {9639 /* VMX preemption timer takes priority over NMI-window exits. */9640 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))9641 {9642 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);9643 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));9644 }9645 /*9646 * Check remaining intercepts.9647 *9648 * NMI-window and Interrupt-window VM-exits.9649 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.9650 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.9651 *9652 * See Intel spec. 26.7.6 "NMI-Window Exiting".9653 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".9654 */9655 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)9656 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)9657 && !TRPMHasTrap(pVCpu))9658 {9659 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));9660 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)9661 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))9662 {9663 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);9664 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));9665 }9666 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)9667 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))9668 {9669 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);9670 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));9671 }9672 }9673 }9674 /* TPR-below threshold/APIC write has the highest priority. */9675 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))9676 {9677 rcStrict = iemVmxApicWriteEmulation(pVCpu);9678 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));9679 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));9680 }9681 /* MTF takes priority over VMX-preemption timer. */9682 else9683 {9684 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);9685 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));9686 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));9687 }9688 return rcStrict;9689 }9690 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */9691 9692 9693 /**9694 * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,9695 * IEMExecOneBypass and friends.9696 *9697 * Similar code is found in IEMExecLots.9698 *9699 * @return Strict VBox status code.9700 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9701 * @param fExecuteInhibit If set, execute the instruction following CLI,9702 * POP SS and MOV SS,GR.9703 * @param pszFunction The calling function name.9704 */9705 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)9706 {9707 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9708 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9709 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9710 RT_NOREF_PV(pszFunction);9711 9712 #ifdef IEM_WITH_SETJMP9713 VBOXSTRICTRC rcStrict;9714 IEM_TRY_SETJMP(pVCpu, rcStrict)9715 {9716 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9717 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9718 }9719 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);9720 {9721 pVCpu->iem.s.cLongJumps++;9722 }9723 IEM_CATCH_LONGJMP_END(pVCpu);9724 #else9725 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9726 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9727 #endif9728 if (rcStrict == VINF_SUCCESS)9729 pVCpu->iem.s.cInstructions++;9730 if (pVCpu->iem.s.cActiveMappings > 0)9731 {9732 Assert(rcStrict != VINF_SUCCESS);9733 iemMemRollback(pVCpu);9734 }9735 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9736 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9737 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9738 9739 //#ifdef DEBUG9740 // AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));9741 //#endif9742 9743 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9744 /*9745 * Perform any VMX nested-guest instruction boundary actions.9746 *9747 * If any of these causes a VM-exit, we must skip executing the next9748 * instruction (would run into stale page tables). A VM-exit makes sure9749 * there is no interrupt-inhibition, so that should ensure we don't go9750 * to try execute the next instruction. Clearing fExecuteInhibit is9751 * problematic because of the setjmp/longjmp clobbering above.9752 */9753 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER9754 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)9755 || rcStrict != VINF_SUCCESS)9756 { /* likely */ }9757 else9758 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);9759 #endif9760 9761 /* Execute the next instruction as well if a cli, pop ss or9762 mov ss, Gr has just completed successfully. */9763 if ( fExecuteInhibit9764 && rcStrict == VINF_SUCCESS9765 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))9766 {9767 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));9768 if (rcStrict == VINF_SUCCESS)9769 {9770 #ifdef LOG_ENABLED9771 iemLogCurInstr(pVCpu, false, pszFunction);9772 #endif9773 #ifdef IEM_WITH_SETJMP9774 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)9775 {9776 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);9777 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9778 }9779 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);9780 {9781 pVCpu->iem.s.cLongJumps++;9782 }9783 IEM_CATCH_LONGJMP_END(pVCpu);9784 #else9785 IEM_OPCODE_GET_FIRST_U8(&b);9786 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);9787 #endif9788 if (rcStrict == VINF_SUCCESS)9789 {9790 pVCpu->iem.s.cInstructions++;9791 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX9792 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER9793 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))9794 { /* likely */ }9795 else9796 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);9797 #endif9798 }9799 if (pVCpu->iem.s.cActiveMappings > 0)9800 {9801 Assert(rcStrict != VINF_SUCCESS);9802 iemMemRollback(pVCpu);9803 }9804 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));9805 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));9806 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));9807 }9808 else if (pVCpu->iem.s.cActiveMappings > 0)9809 iemMemRollback(pVCpu);9810 /** @todo drop this after we bake this change into RIP advancing. */9811 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */9812 }9813 9814 /*9815 * Return value fiddling, statistics and sanity assertions.9816 */9817 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);9818 9819 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));9820 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));9821 return rcStrict;9822 }9823 9824 9825 /**9826 * Execute one instruction.9827 *9828 * @return Strict VBox status code.9829 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9830 */9831 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)9832 {9833 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */9834 #ifdef LOG_ENABLED9835 iemLogCurInstr(pVCpu, true, "IEMExecOne");9836 #endif9837 9838 /*9839 * Do the decoding and emulation.9840 */9841 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);9842 if (rcStrict == VINF_SUCCESS)9843 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");9844 else if (pVCpu->iem.s.cActiveMappings > 0)9845 iemMemRollback(pVCpu);9846 9847 if (rcStrict != VINF_SUCCESS)9848 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",9849 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));9850 return rcStrict;9851 }9852 9853 9854 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,9855 const void *pvOpcodeBytes, size_t cbOpcodeBytes)9856 {9857 VBOXSTRICTRC rcStrict;9858 if ( cbOpcodeBytes9859 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)9860 {9861 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);9862 #ifdef IEM_WITH_CODE_TLB9863 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;9864 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;9865 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);9866 pVCpu->iem.s.offCurInstrStart = 0;9867 pVCpu->iem.s.offInstrNextByte = 0;9868 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;9869 #else9870 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));9871 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);9872 #endif9873 rcStrict = VINF_SUCCESS;9874 }9875 else9876 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);9877 if (rcStrict == VINF_SUCCESS)9878 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");9879 else if (pVCpu->iem.s.cActiveMappings > 0)9880 iemMemRollback(pVCpu);9881 9882 return rcStrict;9883 }9884 9885 9886 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)9887 {9888 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);9889 if (rcStrict == VINF_SUCCESS)9890 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");9891 else if (pVCpu->iem.s.cActiveMappings > 0)9892 iemMemRollback(pVCpu);9893 9894 return rcStrict;9895 }9896 9897 9898 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,9899 const void *pvOpcodeBytes, size_t cbOpcodeBytes)9900 {9901 VBOXSTRICTRC rcStrict;9902 if ( cbOpcodeBytes9903 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)9904 {9905 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);9906 #ifdef IEM_WITH_CODE_TLB9907 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;9908 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;9909 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);9910 pVCpu->iem.s.offCurInstrStart = 0;9911 pVCpu->iem.s.offInstrNextByte = 0;9912 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;9913 #else9914 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));9915 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);9916 #endif9917 rcStrict = VINF_SUCCESS;9918 }9919 else9920 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);9921 if (rcStrict == VINF_SUCCESS)9922 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");9923 else if (pVCpu->iem.s.cActiveMappings > 0)9924 iemMemRollback(pVCpu);9925 9926 return rcStrict;9927 }9928 9929 9930 /**9931 * For handling split cacheline lock operations when the host has split-lock9932 * detection enabled.9933 *9934 * This will cause the interpreter to disregard the lock prefix and implicit9935 * locking (xchg).9936 *9937 * @returns Strict VBox status code.9938 * @param pVCpu The cross context virtual CPU structure of the calling EMT.9939 */9940 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)9941 {9942 /*9943 * Do the decoding and emulation.9944 */9945 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);9946 if (rcStrict == VINF_SUCCESS)9947 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");9948 else if (pVCpu->iem.s.cActiveMappings > 0)9949 iemMemRollback(pVCpu);9950 9951 if (rcStrict != VINF_SUCCESS)9952 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",9953 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));9954 return rcStrict;9955 }9956 9957 9958 /**9959 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to9960 * inject a pending TRPM trap.9961 */9962 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)9963 {9964 Assert(TRPMHasTrap(pVCpu));9965 9966 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)9967 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))9968 {9969 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */9970 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)9971 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);9972 if (fIntrEnabled)9973 {9974 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))9975 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;9976 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))9977 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));9978 else9979 {9980 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));9981 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));9982 }9983 }9984 #else9985 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;9986 #endif9987 if (fIntrEnabled)9988 {9989 uint8_t u8TrapNo;9990 TRPMEVENT enmType;9991 uint32_t uErrCode;9992 RTGCPTR uCr2;9993 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);9994 AssertRC(rc2);9995 Assert(enmType == TRPM_HARDWARE_INT);9996 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);9997 9998 TRPMResetTrap(pVCpu);9999 10000 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10001 /* Injecting an event may cause a VM-exit. */10002 if ( rcStrict != VINF_SUCCESS10003 && rcStrict != VINF_IEM_RAISED_XCPT)10004 return iemExecStatusCodeFiddling(pVCpu, rcStrict);10005 #else10006 NOREF(rcStrict);10007 #endif10008 }10009 }10010 10011 return VINF_SUCCESS;10012 }10013 10014 10015 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)10016 {10017 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;10018 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));10019 Assert(cMaxInstructions > 0);10020 10021 /*10022 * See if there is an interrupt pending in TRPM, inject it if we can.10023 */10024 /** @todo What if we are injecting an exception and not an interrupt? Is that10025 * possible here? For now we assert it is indeed only an interrupt. */10026 if (!TRPMHasTrap(pVCpu))10027 { /* likely */ }10028 else10029 {10030 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);10031 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10032 { /*likely */ }10033 else10034 return rcStrict;10035 }10036 10037 /*10038 * Initial decoder init w/ prefetch, then setup setjmp.10039 */10040 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);10041 if (rcStrict == VINF_SUCCESS)10042 {10043 #ifdef IEM_WITH_SETJMP10044 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */10045 IEM_TRY_SETJMP(pVCpu, rcStrict)10046 #endif10047 {10048 /*10049 * The run loop. We limit ourselves to 4096 instructions right now.10050 */10051 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;10052 PVMCC pVM = pVCpu->CTX_SUFF(pVM);10053 for (;;)10054 {10055 /*10056 * Log the state.10057 */10058 #ifdef LOG_ENABLED10059 iemLogCurInstr(pVCpu, true, "IEMExecLots");10060 #endif10061 10062 /*10063 * Do the decoding and emulation.10064 */10065 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);10066 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);10067 #ifdef VBOX_STRICT10068 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);10069 #endif10070 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10071 {10072 Assert(pVCpu->iem.s.cActiveMappings == 0);10073 pVCpu->iem.s.cInstructions++;10074 10075 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10076 /* Perform any VMX nested-guest instruction boundary actions. */10077 uint64_t fCpu = pVCpu->fLocalForcedActions;10078 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER10079 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))10080 { /* likely */ }10081 else10082 {10083 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);10084 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10085 fCpu = pVCpu->fLocalForcedActions;10086 else10087 {10088 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10089 break;10090 }10091 }10092 #endif10093 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))10094 {10095 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX10096 uint64_t fCpu = pVCpu->fLocalForcedActions;10097 #endif10098 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR310099 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL10100 | VMCPU_FF_TLB_FLUSH10101 | VMCPU_FF_UNHALT );10102 10103 if (RT_LIKELY( ( !fCpu10104 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))10105 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )10106 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))10107 {10108 if (--cMaxInstructionsGccStupidity > 0)10109 {10110 /* Poll timers every now an then according to the caller's specs. */10111 if ( (cMaxInstructionsGccStupidity & cPollRate) != 010112 || !TMTimerPollBool(pVM, pVCpu))10113 {10114 Assert(pVCpu->iem.s.cActiveMappings == 0);10115 iemReInitDecoder(pVCpu);10116 continue;10117 }10118 }10119 }10120 }10121 Assert(pVCpu->iem.s.cActiveMappings == 0);10122 }10123 else if (pVCpu->iem.s.cActiveMappings > 0)10124 iemMemRollback(pVCpu);10125 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10126 break;10127 }10128 }10129 #ifdef IEM_WITH_SETJMP10130 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);10131 {10132 if (pVCpu->iem.s.cActiveMappings > 0)10133 iemMemRollback(pVCpu);10134 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10135 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10136 # endif10137 pVCpu->iem.s.cLongJumps++;10138 }10139 IEM_CATCH_LONGJMP_END(pVCpu);10140 #endif10141 10142 /*10143 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).10144 */10145 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));10146 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));10147 }10148 else10149 {10150 if (pVCpu->iem.s.cActiveMappings > 0)10151 iemMemRollback(pVCpu);10152 10153 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10154 /*10155 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching10156 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.10157 */10158 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10159 #endif10160 }10161 10162 /*10163 * Maybe re-enter raw-mode and log.10164 */10165 if (rcStrict != VINF_SUCCESS)10166 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",10167 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));10168 if (pcInstructions)10169 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;10170 return rcStrict;10171 }10172 10173 10174 /**10175 * Interface used by EMExecuteExec, does exit statistics and limits.10176 *10177 * @returns Strict VBox status code.10178 * @param pVCpu The cross context virtual CPU structure.10179 * @param fWillExit To be defined.10180 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.10181 * @param cMaxInstructions Maximum number of instructions to execute.10182 * @param cMaxInstructionsWithoutExits10183 * The max number of instructions without exits.10184 * @param pStats Where to return statistics.10185 */10186 VMM_INT_DECL(VBOXSTRICTRC)10187 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,10188 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)10189 {10190 NOREF(fWillExit); /** @todo define flexible exit crits */10191 10192 /*10193 * Initialize return stats.10194 */10195 pStats->cInstructions = 0;10196 pStats->cExits = 0;10197 pStats->cMaxExitDistance = 0;10198 pStats->cReserved = 0;10199 10200 /*10201 * Initial decoder init w/ prefetch, then setup setjmp.10202 */10203 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);10204 if (rcStrict == VINF_SUCCESS)10205 {10206 #ifdef IEM_WITH_SETJMP10207 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */10208 IEM_TRY_SETJMP(pVCpu, rcStrict)10209 #endif10210 {10211 #ifdef IN_RING010212 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);10213 #endif10214 uint32_t cInstructionSinceLastExit = 0;10215 10216 /*10217 * The run loop. We limit ourselves to 4096 instructions right now.10218 */10219 PVM pVM = pVCpu->CTX_SUFF(pVM);10220 for (;;)10221 {10222 /*10223 * Log the state.10224 */10225 #ifdef LOG_ENABLED10226 iemLogCurInstr(pVCpu, true, "IEMExecForExits");10227 #endif10228 10229 /*10230 * Do the decoding and emulation.10231 */10232 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;10233 10234 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);10235 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);10236 10237 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits10238 && cInstructionSinceLastExit > 0 /* don't count the first */ )10239 {10240 pStats->cExits += 1;10241 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)10242 pStats->cMaxExitDistance = cInstructionSinceLastExit;10243 cInstructionSinceLastExit = 0;10244 }10245 10246 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10247 {10248 Assert(pVCpu->iem.s.cActiveMappings == 0);10249 pVCpu->iem.s.cInstructions++;10250 pStats->cInstructions++;10251 cInstructionSinceLastExit++;10252 10253 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10254 /* Perform any VMX nested-guest instruction boundary actions. */10255 uint64_t fCpu = pVCpu->fLocalForcedActions;10256 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER10257 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))10258 { /* likely */ }10259 else10260 {10261 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);10262 if (RT_LIKELY(rcStrict == VINF_SUCCESS))10263 fCpu = pVCpu->fLocalForcedActions;10264 else10265 {10266 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10267 break;10268 }10269 }10270 #endif10271 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))10272 {10273 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX10274 uint64_t fCpu = pVCpu->fLocalForcedActions;10275 #endif10276 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR310277 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL10278 | VMCPU_FF_TLB_FLUSH10279 | VMCPU_FF_UNHALT );10280 if (RT_LIKELY( ( ( !fCpu10281 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))10282 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))10283 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )10284 || pStats->cInstructions < cMinInstructions))10285 {10286 if (pStats->cInstructions < cMaxInstructions)10287 {10288 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)10289 {10290 #ifdef IN_RING010291 if ( !fCheckPreemptionPending10292 || !RTThreadPreemptIsPending(NIL_RTTHREAD))10293 #endif10294 {10295 Assert(pVCpu->iem.s.cActiveMappings == 0);10296 iemReInitDecoder(pVCpu);10297 continue;10298 }10299 #ifdef IN_RING010300 rcStrict = VINF_EM_RAW_INTERRUPT;10301 break;10302 #endif10303 }10304 }10305 }10306 Assert(!(fCpu & VMCPU_FF_IEM));10307 }10308 Assert(pVCpu->iem.s.cActiveMappings == 0);10309 }10310 else if (pVCpu->iem.s.cActiveMappings > 0)10311 iemMemRollback(pVCpu);10312 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10313 break;10314 }10315 }10316 #ifdef IEM_WITH_SETJMP10317 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);10318 {10319 if (pVCpu->iem.s.cActiveMappings > 0)10320 iemMemRollback(pVCpu);10321 pVCpu->iem.s.cLongJumps++;10322 }10323 IEM_CATCH_LONGJMP_END(pVCpu);10324 #endif10325 10326 /*10327 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).10328 */10329 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));10330 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));10331 }10332 else10333 {10334 if (pVCpu->iem.s.cActiveMappings > 0)10335 iemMemRollback(pVCpu);10336 10337 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)10338 /*10339 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching10340 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.10341 */10342 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);10343 #endif10344 }10345 10346 /*10347 * Maybe re-enter raw-mode and log.10348 */10349 if (rcStrict != VINF_SUCCESS)10350 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",10351 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,10352 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));10353 return rcStrict;10354 }10355 10356 10357 /**10358 * Injects a trap, fault, abort, software interrupt or external interrupt.10359 *10360 * The parameter list matches TRPMQueryTrapAll pretty closely.10361 *10362 * @returns Strict VBox status code.10363 * @param pVCpu The cross context virtual CPU structure of the calling EMT.10364 * @param u8TrapNo The trap number.10365 * @param enmType What type is it (trap/fault/abort), software10366 * interrupt or hardware interrupt.10367 * @param uErrCode The error code if applicable.10368 * @param uCr2 The CR2 value if applicable.10369 * @param cbInstr The instruction length (only relevant for10370 * software interrupts).10371 */10372 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,10373 uint8_t cbInstr)10374 {10375 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */10376 #ifdef DBGFTRACE_ENABLED10377 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",10378 u8TrapNo, enmType, uErrCode, uCr2);10379 #endif10380 10381 uint32_t fFlags;10382 switch (enmType)10383 {10384 case TRPM_HARDWARE_INT:10385 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));10386 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;10387 uErrCode = uCr2 = 0;10388 break;10389 10390 case TRPM_SOFTWARE_INT:10391 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));10392 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;10393 uErrCode = uCr2 = 0;10394 break;10395 10396 case TRPM_TRAP:10397 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */10398 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));10399 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;10400 if (u8TrapNo == X86_XCPT_PF)10401 fFlags |= IEM_XCPT_FLAGS_CR2;10402 switch (u8TrapNo)10403 {10404 case X86_XCPT_DF:10405 case X86_XCPT_TS:10406 case X86_XCPT_NP:10407 case X86_XCPT_SS:10408 case X86_XCPT_PF:10409 case X86_XCPT_AC:10410 case X86_XCPT_GP:10411 fFlags |= IEM_XCPT_FLAGS_ERR;10412 break;10413 }10414 break;10415 10416 IEM_NOT_REACHED_DEFAULT_CASE_RET();10417 }10418 10419 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);10420 10421 if (pVCpu->iem.s.cActiveMappings > 0)10422 iemMemRollback(pVCpu);10423 10424 return rcStrict;10425 }10426 10427 10428 /**10429 * Injects the active TRPM event.10430 *10431 * @returns Strict VBox status code.10432 * @param pVCpu The cross context virtual CPU structure.10433 */10434 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)10435 {10436 #ifndef IEM_IMPLEMENTS_TASKSWITCH10437 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));10438 #else10439 uint8_t u8TrapNo;10440 TRPMEVENT enmType;10441 uint32_t uErrCode;10442 RTGCUINTPTR uCr2;10443 uint8_t cbInstr;10444 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);10445 if (RT_FAILURE(rc))10446 return rc;10447 10448 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle10449 * ICEBP \#DB injection as a special case. */10450 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);10451 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM10452 if (rcStrict == VINF_SVM_VMEXIT)10453 rcStrict = VINF_SUCCESS;10454 #endif10455 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX10456 if (rcStrict == VINF_VMX_VMEXIT)10457 rcStrict = VINF_SUCCESS;10458 #endif10459 /** @todo Are there any other codes that imply the event was successfully10460 * delivered to the guest? See @bugref{6607}. */10461 if ( rcStrict == VINF_SUCCESS10462 || rcStrict == VINF_IEM_RAISED_XCPT)10463 TRPMResetTrap(pVCpu);10464 10465 return rcStrict;10466 #endif10467 }10468 10469 10470 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)10471 {10472 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);10473 return VERR_NOT_IMPLEMENTED;10474 }10475 10476 10477 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)10478 {10479 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);10480 return VERR_NOT_IMPLEMENTED;10481 }10482 10483 10484 /**10485 * Interface for HM and EM for executing string I/O OUT (write) instructions.10486 *10487 * This API ASSUMES that the caller has already verified that the guest code is10488 * allowed to access the I/O port. (The I/O port is in the DX register in the10489 * guest state.)10490 *10491 * @returns Strict VBox status code.10492 * @param pVCpu The cross context virtual CPU structure.10493 * @param cbValue The size of the I/O port access (1, 2, or 4).10494 * @param enmAddrMode The addressing mode.10495 * @param fRepPrefix Indicates whether a repeat prefix is used10496 * (doesn't matter which for this instruction).10497 * @param cbInstr The instruction length in bytes.10498 * @param iEffSeg The effective segment address.10499 * @param fIoChecked Whether the access to the I/O port has been10500 * checked or not. It's typically checked in the10501 * HM scenario.10502 */10503 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,10504 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)10505 {10506 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);10507 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10508 10509 /*10510 * State init.10511 */10512 iemInitExec(pVCpu, 0 /*fExecOpts*/);10513 10514 /*10515 * Switch orgy for getting to the right handler.10516 */10517 VBOXSTRICTRC rcStrict;10518 if (fRepPrefix)10519 {10520 switch (enmAddrMode)10521 {10522 case IEMMODE_16BIT:10523 switch (cbValue)10524 {10525 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10526 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10527 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10528 default:10529 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10530 }10531 break;10532 10533 case IEMMODE_32BIT:10534 switch (cbValue)10535 {10536 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10537 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10538 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10539 default:10540 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10541 }10542 break;10543 10544 case IEMMODE_64BIT:10545 switch (cbValue)10546 {10547 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10548 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10549 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10550 default:10551 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10552 }10553 break;10554 10555 default:10556 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10557 }10558 }10559 else10560 {10561 switch (enmAddrMode)10562 {10563 case IEMMODE_16BIT:10564 switch (cbValue)10565 {10566 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10567 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10568 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10569 default:10570 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10571 }10572 break;10573 10574 case IEMMODE_32BIT:10575 switch (cbValue)10576 {10577 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10578 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10579 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10580 default:10581 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10582 }10583 break;10584 10585 case IEMMODE_64BIT:10586 switch (cbValue)10587 {10588 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10589 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10590 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;10591 default:10592 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10593 }10594 break;10595 10596 default:10597 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10598 }10599 }10600 10601 if (pVCpu->iem.s.cActiveMappings)10602 iemMemRollback(pVCpu);10603 10604 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10605 }10606 10607 10608 /**10609 * Interface for HM and EM for executing string I/O IN (read) instructions.10610 *10611 * This API ASSUMES that the caller has already verified that the guest code is10612 * allowed to access the I/O port. (The I/O port is in the DX register in the10613 * guest state.)10614 *10615 * @returns Strict VBox status code.10616 * @param pVCpu The cross context virtual CPU structure.10617 * @param cbValue The size of the I/O port access (1, 2, or 4).10618 * @param enmAddrMode The addressing mode.10619 * @param fRepPrefix Indicates whether a repeat prefix is used10620 * (doesn't matter which for this instruction).10621 * @param cbInstr The instruction length in bytes.10622 * @param fIoChecked Whether the access to the I/O port has been10623 * checked or not. It's typically checked in the10624 * HM scenario.10625 */10626 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,10627 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)10628 {10629 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10630 10631 /*10632 * State init.10633 */10634 iemInitExec(pVCpu, 0 /*fExecOpts*/);10635 10636 /*10637 * Switch orgy for getting to the right handler.10638 */10639 VBOXSTRICTRC rcStrict;10640 if (fRepPrefix)10641 {10642 switch (enmAddrMode)10643 {10644 case IEMMODE_16BIT:10645 switch (cbValue)10646 {10647 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;10648 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;10649 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;10650 default:10651 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10652 }10653 break;10654 10655 case IEMMODE_32BIT:10656 switch (cbValue)10657 {10658 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;10659 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;10660 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;10661 default:10662 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10663 }10664 break;10665 10666 case IEMMODE_64BIT:10667 switch (cbValue)10668 {10669 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;10670 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;10671 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;10672 default:10673 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10674 }10675 break;10676 10677 default:10678 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10679 }10680 }10681 else10682 {10683 switch (enmAddrMode)10684 {10685 case IEMMODE_16BIT:10686 switch (cbValue)10687 {10688 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;10689 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;10690 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;10691 default:10692 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10693 }10694 break;10695 10696 case IEMMODE_32BIT:10697 switch (cbValue)10698 {10699 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;10700 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;10701 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;10702 default:10703 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10704 }10705 break;10706 10707 case IEMMODE_64BIT:10708 switch (cbValue)10709 {10710 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;10711 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;10712 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;10713 default:10714 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);10715 }10716 break;10717 10718 default:10719 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);10720 }10721 }10722 10723 if ( pVCpu->iem.s.cActiveMappings == 010724 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))10725 { /* likely */ }10726 else10727 {10728 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));10729 iemMemRollback(pVCpu);10730 }10731 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10732 }10733 10734 10735 /**10736 * Interface for rawmode to write execute an OUT instruction.10737 *10738 * @returns Strict VBox status code.10739 * @param pVCpu The cross context virtual CPU structure.10740 * @param cbInstr The instruction length in bytes.10741 * @param u16Port The port to read.10742 * @param fImm Whether the port is specified using an immediate operand or10743 * using the implicit DX register.10744 * @param cbReg The register size.10745 *10746 * @remarks In ring-0 not all of the state needs to be synced in.10747 */10748 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)10749 {10750 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10751 Assert(cbReg <= 4 && cbReg != 3);10752 10753 iemInitExec(pVCpu, 0 /*fExecOpts*/);10754 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,10755 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);10756 Assert(!pVCpu->iem.s.cActiveMappings);10757 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10758 }10759 10760 10761 /**10762 * Interface for rawmode to write execute an IN instruction.10763 *10764 * @returns Strict VBox status code.10765 * @param pVCpu The cross context virtual CPU structure.10766 * @param cbInstr The instruction length in bytes.10767 * @param u16Port The port to read.10768 * @param fImm Whether the port is specified using an immediate operand or10769 * using the implicit DX.10770 * @param cbReg The register size.10771 */10772 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)10773 {10774 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);10775 Assert(cbReg <= 4 && cbReg != 3);10776 10777 iemInitExec(pVCpu, 0 /*fExecOpts*/);10778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,10779 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);10780 Assert(!pVCpu->iem.s.cActiveMappings);10781 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10782 }10783 10784 10785 /**10786 * Interface for HM and EM to write to a CRx register.10787 *10788 * @returns Strict VBox status code.10789 * @param pVCpu The cross context virtual CPU structure.10790 * @param cbInstr The instruction length in bytes.10791 * @param iCrReg The control register number (destination).10792 * @param iGReg The general purpose register number (source).10793 *10794 * @remarks In ring-0 not all of the state needs to be synced in.10795 */10796 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)10797 {10798 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10799 Assert(iCrReg < 16);10800 Assert(iGReg < 16);10801 10802 iemInitExec(pVCpu, 0 /*fExecOpts*/);10803 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);10804 Assert(!pVCpu->iem.s.cActiveMappings);10805 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10806 }10807 10808 10809 /**10810 * Interface for HM and EM to read from a CRx register.10811 *10812 * @returns Strict VBox status code.10813 * @param pVCpu The cross context virtual CPU structure.10814 * @param cbInstr The instruction length in bytes.10815 * @param iGReg The general purpose register number (destination).10816 * @param iCrReg The control register number (source).10817 *10818 * @remarks In ring-0 not all of the state needs to be synced in.10819 */10820 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)10821 {10822 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10823 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR410824 | CPUMCTX_EXTRN_APIC_TPR);10825 Assert(iCrReg < 16);10826 Assert(iGReg < 16);10827 10828 iemInitExec(pVCpu, 0 /*fExecOpts*/);10829 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);10830 Assert(!pVCpu->iem.s.cActiveMappings);10831 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10832 }10833 10834 10835 /**10836 * Interface for HM and EM to write to a DRx register.10837 *10838 * @returns Strict VBox status code.10839 * @param pVCpu The cross context virtual CPU structure.10840 * @param cbInstr The instruction length in bytes.10841 * @param iDrReg The debug register number (destination).10842 * @param iGReg The general purpose register number (source).10843 *10844 * @remarks In ring-0 not all of the state needs to be synced in.10845 */10846 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)10847 {10848 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10849 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);10850 Assert(iDrReg < 8);10851 Assert(iGReg < 16);10852 10853 iemInitExec(pVCpu, 0 /*fExecOpts*/);10854 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);10855 Assert(!pVCpu->iem.s.cActiveMappings);10856 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10857 }10858 10859 10860 /**10861 * Interface for HM and EM to read from a DRx register.10862 *10863 * @returns Strict VBox status code.10864 * @param pVCpu The cross context virtual CPU structure.10865 * @param cbInstr The instruction length in bytes.10866 * @param iGReg The general purpose register number (destination).10867 * @param iDrReg The debug register number (source).10868 *10869 * @remarks In ring-0 not all of the state needs to be synced in.10870 */10871 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)10872 {10873 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10874 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);10875 Assert(iDrReg < 8);10876 Assert(iGReg < 16);10877 10878 iemInitExec(pVCpu, 0 /*fExecOpts*/);10879 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);10880 Assert(!pVCpu->iem.s.cActiveMappings);10881 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10882 }10883 10884 10885 /**10886 * Interface for HM and EM to clear the CR0[TS] bit.10887 *10888 * @returns Strict VBox status code.10889 * @param pVCpu The cross context virtual CPU structure.10890 * @param cbInstr The instruction length in bytes.10891 *10892 * @remarks In ring-0 not all of the state needs to be synced in.10893 */10894 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)10895 {10896 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10897 10898 iemInitExec(pVCpu, 0 /*fExecOpts*/);10899 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);10900 Assert(!pVCpu->iem.s.cActiveMappings);10901 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10902 }10903 10904 10905 /**10906 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).10907 *10908 * @returns Strict VBox status code.10909 * @param pVCpu The cross context virtual CPU structure.10910 * @param cbInstr The instruction length in bytes.10911 * @param uValue The value to load into CR0.10912 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a10913 * memory operand. Otherwise pass NIL_RTGCPTR.10914 *10915 * @remarks In ring-0 not all of the state needs to be synced in.10916 */10917 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)10918 {10919 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);10920 10921 iemInitExec(pVCpu, 0 /*fExecOpts*/);10922 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);10923 Assert(!pVCpu->iem.s.cActiveMappings);10924 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10925 }10926 10927 10928 /**10929 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).10930 *10931 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.10932 *10933 * @returns Strict VBox status code.10934 * @param pVCpu The cross context virtual CPU structure of the calling EMT.10935 * @param cbInstr The instruction length in bytes.10936 * @remarks In ring-0 not all of the state needs to be synced in.10937 * @thread EMT(pVCpu)10938 */10939 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)10940 {10941 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);10942 10943 iemInitExec(pVCpu, 0 /*fExecOpts*/);10944 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);10945 Assert(!pVCpu->iem.s.cActiveMappings);10946 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10947 }10948 10949 10950 /**10951 * Interface for HM and EM to emulate the WBINVD instruction.10952 *10953 * @returns Strict VBox status code.10954 * @param pVCpu The cross context virtual CPU structure.10955 * @param cbInstr The instruction length in bytes.10956 *10957 * @remarks In ring-0 not all of the state needs to be synced in.10958 */10959 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)10960 {10961 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10962 10963 iemInitExec(pVCpu, 0 /*fExecOpts*/);10964 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);10965 Assert(!pVCpu->iem.s.cActiveMappings);10966 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10967 }10968 10969 10970 /**10971 * Interface for HM and EM to emulate the INVD instruction.10972 *10973 * @returns Strict VBox status code.10974 * @param pVCpu The cross context virtual CPU structure.10975 * @param cbInstr The instruction length in bytes.10976 *10977 * @remarks In ring-0 not all of the state needs to be synced in.10978 */10979 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)10980 {10981 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);10982 10983 iemInitExec(pVCpu, 0 /*fExecOpts*/);10984 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);10985 Assert(!pVCpu->iem.s.cActiveMappings);10986 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);10987 }10988 10989 10990 /**10991 * Interface for HM and EM to emulate the INVLPG instruction.10992 *10993 * @returns Strict VBox status code.10994 * @retval VINF_PGM_SYNC_CR310995 *10996 * @param pVCpu The cross context virtual CPU structure.10997 * @param cbInstr The instruction length in bytes.10998 * @param GCPtrPage The effective address of the page to invalidate.10999 *11000 * @remarks In ring-0 not all of the state needs to be synced in.11001 */11002 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)11003 {11004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11005 11006 iemInitExec(pVCpu, 0 /*fExecOpts*/);11007 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);11008 Assert(!pVCpu->iem.s.cActiveMappings);11009 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11010 }11011 11012 11013 /**11014 * Interface for HM and EM to emulate the INVPCID instruction.11015 *11016 * @returns Strict VBox status code.11017 * @retval VINF_PGM_SYNC_CR311018 *11019 * @param pVCpu The cross context virtual CPU structure.11020 * @param cbInstr The instruction length in bytes.11021 * @param iEffSeg The effective segment register.11022 * @param GCPtrDesc The effective address of the INVPCID descriptor.11023 * @param uType The invalidation type.11024 *11025 * @remarks In ring-0 not all of the state needs to be synced in.11026 */11027 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,11028 uint64_t uType)11029 {11030 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);11031 11032 iemInitExec(pVCpu, 0 /*fExecOpts*/);11033 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);11034 Assert(!pVCpu->iem.s.cActiveMappings);11035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11036 }11037 11038 11039 /**11040 * Interface for HM and EM to emulate the CPUID instruction.11041 *11042 * @returns Strict VBox status code.11043 *11044 * @param pVCpu The cross context virtual CPU structure.11045 * @param cbInstr The instruction length in bytes.11046 *11047 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.11048 */11049 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)11050 {11051 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11052 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);11053 11054 iemInitExec(pVCpu, 0 /*fExecOpts*/);11055 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);11056 Assert(!pVCpu->iem.s.cActiveMappings);11057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11058 }11059 11060 11061 /**11062 * Interface for HM and EM to emulate the RDPMC instruction.11063 *11064 * @returns Strict VBox status code.11065 *11066 * @param pVCpu The cross context virtual CPU structure.11067 * @param cbInstr The instruction length in bytes.11068 *11069 * @remarks Not all of the state needs to be synced in.11070 */11071 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)11072 {11073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11074 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);11075 11076 iemInitExec(pVCpu, 0 /*fExecOpts*/);11077 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);11078 Assert(!pVCpu->iem.s.cActiveMappings);11079 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11080 }11081 11082 11083 /**11084 * Interface for HM and EM to emulate the RDTSC instruction.11085 *11086 * @returns Strict VBox status code.11087 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11088 *11089 * @param pVCpu The cross context virtual CPU structure.11090 * @param cbInstr The instruction length in bytes.11091 *11092 * @remarks Not all of the state needs to be synced in.11093 */11094 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)11095 {11096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11097 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);11098 11099 iemInitExec(pVCpu, 0 /*fExecOpts*/);11100 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);11101 Assert(!pVCpu->iem.s.cActiveMappings);11102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11103 }11104 11105 11106 /**11107 * Interface for HM and EM to emulate the RDTSCP instruction.11108 *11109 * @returns Strict VBox status code.11110 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11111 *11112 * @param pVCpu The cross context virtual CPU structure.11113 * @param cbInstr The instruction length in bytes.11114 *11115 * @remarks Not all of the state needs to be synced in. Recommended11116 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.11117 */11118 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)11119 {11120 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11121 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);11122 11123 iemInitExec(pVCpu, 0 /*fExecOpts*/);11124 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);11125 Assert(!pVCpu->iem.s.cActiveMappings);11126 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11127 }11128 11129 11130 /**11131 * Interface for HM and EM to emulate the RDMSR instruction.11132 *11133 * @returns Strict VBox status code.11134 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11135 *11136 * @param pVCpu The cross context virtual CPU structure.11137 * @param cbInstr The instruction length in bytes.11138 *11139 * @remarks Not all of the state needs to be synced in. Requires RCX and11140 * (currently) all MSRs.11141 */11142 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)11143 {11144 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11145 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);11146 11147 iemInitExec(pVCpu, 0 /*fExecOpts*/);11148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);11149 Assert(!pVCpu->iem.s.cActiveMappings);11150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11151 }11152 11153 11154 /**11155 * Interface for HM and EM to emulate the WRMSR instruction.11156 *11157 * @returns Strict VBox status code.11158 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11159 *11160 * @param pVCpu The cross context virtual CPU structure.11161 * @param cbInstr The instruction length in bytes.11162 *11163 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,11164 * and (currently) all MSRs.11165 */11166 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)11167 {11168 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);11169 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK11170 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);11171 11172 iemInitExec(pVCpu, 0 /*fExecOpts*/);11173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);11174 Assert(!pVCpu->iem.s.cActiveMappings);11175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11176 }11177 11178 11179 /**11180 * Interface for HM and EM to emulate the MONITOR instruction.11181 *11182 * @returns Strict VBox status code.11183 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11184 *11185 * @param pVCpu The cross context virtual CPU structure.11186 * @param cbInstr The instruction length in bytes.11187 *11188 * @remarks Not all of the state needs to be synced in.11189 * @remarks ASSUMES the default segment of DS and no segment override prefixes11190 * are used.11191 */11192 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)11193 {11194 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11195 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);11196 11197 iemInitExec(pVCpu, 0 /*fExecOpts*/);11198 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);11199 Assert(!pVCpu->iem.s.cActiveMappings);11200 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11201 }11202 11203 11204 /**11205 * Interface for HM and EM to emulate the MWAIT instruction.11206 *11207 * @returns Strict VBox status code.11208 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11209 *11210 * @param pVCpu The cross context virtual CPU structure.11211 * @param cbInstr The instruction length in bytes.11212 *11213 * @remarks Not all of the state needs to be synced in.11214 */11215 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)11216 {11217 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);11218 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);11219 11220 iemInitExec(pVCpu, 0 /*fExecOpts*/);11221 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);11222 Assert(!pVCpu->iem.s.cActiveMappings);11223 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11224 }11225 11226 11227 /**11228 * Interface for HM and EM to emulate the HLT instruction.11229 *11230 * @returns Strict VBox status code.11231 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.11232 *11233 * @param pVCpu The cross context virtual CPU structure.11234 * @param cbInstr The instruction length in bytes.11235 *11236 * @remarks Not all of the state needs to be synced in.11237 */11238 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)11239 {11240 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);11241 11242 iemInitExec(pVCpu, 0 /*fExecOpts*/);11243 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);11244 Assert(!pVCpu->iem.s.cActiveMappings);11245 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);11246 }11247 11248 11249 3041 /** 11250 3042 * Checks if IEM is in the process of delivering an event (interrupt or … … 11282 3074 } 11283 3075 11284 #ifdef IN_RING3 11285 11286 /** 11287 * Handles the unlikely and probably fatal merge cases. 11288 * 11289 * @returns Merged status code. 11290 * @param rcStrict Current EM status code. 11291 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge 11292 * with @a rcStrict. 11293 * @param iMemMap The memory mapping index. For error reporting only. 11294 * @param pVCpu The cross context virtual CPU structure of the calling 11295 * thread, for error reporting only. 11296 */ 11297 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, 11298 unsigned iMemMap, PVMCPUCC pVCpu) 11299 { 11300 if (RT_FAILURE_NP(rcStrict)) 11301 return rcStrict; 11302 11303 if (RT_FAILURE_NP(rcStrictCommit)) 11304 return rcStrictCommit; 11305 11306 if (rcStrict == rcStrictCommit) 11307 return rcStrictCommit; 11308 11309 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n", 11310 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap, 11311 pVCpu->iem.s.aMemMappings[iMemMap].fAccess, 11312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 11313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)); 11314 return VERR_IOM_FF_STATUS_IPE; 11315 } 11316 11317 11318 /** 11319 * Helper for IOMR3ProcessForceFlag. 11320 * 11321 * @returns Merged status code. 11322 * @param rcStrict Current EM status code. 11323 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge 11324 * with @a rcStrict. 11325 * @param iMemMap The memory mapping index. For error reporting only. 11326 * @param pVCpu The cross context virtual CPU structure of the calling 11327 * thread, for error reporting only. 11328 */ 11329 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu) 11330 { 11331 /* Simple. */ 11332 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3)) 11333 return rcStrictCommit; 11334 11335 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS)) 11336 return rcStrict; 11337 11338 /* EM scheduling status codes. */ 11339 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST 11340 && rcStrict <= VINF_EM_LAST)) 11341 { 11342 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST 11343 && rcStrictCommit <= VINF_EM_LAST)) 11344 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit; 11345 } 11346 11347 /* Unlikely */ 11348 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu); 11349 } 11350 11351 11352 /** 11353 * Called by force-flag handling code when VMCPU_FF_IEM is set. 11354 * 11355 * @returns Merge between @a rcStrict and what the commit operation returned. 11356 * @param pVM The cross context VM structure. 11357 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 11358 * @param rcStrict The status code returned by ring-0 or raw-mode. 11359 */ 11360 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) 11361 { 11362 /* 11363 * Reset the pending commit. 11364 */ 11365 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess) 11366 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND), 11367 ("%#x %#x %#x\n", 11368 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess)); 11369 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM); 11370 11371 /* 11372 * Commit the pending bounce buffers (usually just one). 11373 */ 11374 unsigned cBufs = 0; 11375 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings); 11376 while (iMemMap-- > 0) 11377 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND)) 11378 { 11379 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE); 11380 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED); 11381 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned); 11382 11383 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst; 11384 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond; 11385 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]; 11386 11387 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST) 11388 { 11389 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM, 11390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, 11391 pbBuf, 11392 cbFirst, 11393 PGMACCESSORIGIN_IEM); 11394 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu); 11395 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n", 11396 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, 11397 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict))); 11398 } 11399 11400 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND) 11401 { 11402 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM, 11403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, 11404 pbBuf + cbFirst, 11405 cbSecond, 11406 PGMACCESSORIGIN_IEM); 11407 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu); 11408 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n", 11409 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, 11410 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict))); 11411 } 11412 cBufs++; 11413 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; 11414 } 11415 11416 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings, 11417 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings, 11418 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess)); 11419 pVCpu->iem.s.cActiveMappings = 0; 11420 return rcStrict; 11421 } 11422 11423 #endif /* IN_RING3 */ 11424 3076 /** @} */ 3077 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMInternal-x86.h
r108195 r108220 3104 3104 VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT; 3105 3105 VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT; 3106 VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, 3107 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT; 3106 3108 3107 3109 VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
Note:
See TracChangeset
for help on using the changeset viewer.