VirtualBox

Changeset 105768 in vbox for trunk/src/VBox/VMM/include


Ignore:
Timestamp:
Aug 21, 2024 2:01:05 PM (6 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
164480
Message:

VMM/IEM: Eliminated an unnecessary CS.LIM check in IEM_MC_REL_JMP_XXX for FLAT 32-bit mode together with a unnecessary canonical target RIP check for 64-bit mode jumps within the same page (todo 5). bugref:10720

Location:
trunk/src/VBox/VMM/include
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/include/IEMInline.h

    r105465 r105768  
    23572357
    23582358/**
    2359  * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
    2360  * code (never 64-bit).
     2359 * Adds a 8-bit signed jump offset to RIP from 64-bit code when the caller is
     2360 * sure it stays within the same page.
    23612361 *
    23622362 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    23712371 *                              taking the wrong conditional branhc.
    23722372 */
    2373 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    2374                                                                              IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
    2375 {
    2376     Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2377     Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
    2378 
    2379     uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    2380     if (enmEffOpSize == IEMMODE_16BIT)
    2381         uNewEip &= UINT16_MAX;
    2382     if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2383         pVCpu->cpum.GstCtx.rip = uNewEip;
    2384     else
    2385         return iemRaiseGeneralProtectionFault0(pVCpu);
     2373DECL_FORCE_INLINE(VBOXSTRICTRC)
     2374iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2375                                                    IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2376{
     2377    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2378    Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
     2379
     2380    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2381    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     2382    pVCpu->cpum.GstCtx.rip = uNewRip;
    23862383
    23872384#ifndef IEM_WITH_CODE_TLB
     
    23972394
    23982395/**
    2399  * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
    2400  *
    2401  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    2402  * segment limit.
    2403  *
    2404  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2405  * @param   cbInstr             Instruction size.
    2406  * @param   offNextInstr        The offset of the next instruction.
    2407  * @param   rcNormal            VINF_SUCCESS to continue TB.
    2408  *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    2409  *                              taking the wrong conditional branhc.
    2410  */
    2411 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
    2412                                                                             int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
    2413 {
    2414     Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2415 
    2416     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
    2417     if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2418         pVCpu->cpum.GstCtx.rip = uNewIp;
    2419     else
    2420         return iemRaiseGeneralProtectionFault0(pVCpu);
    2421 
    2422 #ifndef IEM_WITH_CODE_TLB
    2423     iemOpcodeFlushLight(pVCpu, cbInstr);
    2424 #endif
    2425 
    2426     /*
    2427      * Clear RF and finish the instruction (maybe raise #DB).
    2428      */
    2429     return iemRegFinishClearingRF(pVCpu, rcNormal);
    2430 }
    2431 
    2432 
    2433 /**
    2434  * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
    2435  * clearing of flags.
     2396 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
     2397 * code (never 64-bit).
    24362398 *
    24372399 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    24462408 *                              taking the wrong conditional branhc.
    24472409 */
    2448 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    2449                                                                           IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
    2450 {
    2451     Assert(IEM_IS_64BIT_CODE(pVCpu));
    2452     Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
    2453 
    2454     uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2410DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2411                                                                             IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2412{
     2413    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2414    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     2415
     2416    uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    24552417    if (enmEffOpSize == IEMMODE_16BIT)
    2456         uNewRip &= UINT16_MAX;
    2457 
    2458     if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    2459         pVCpu->cpum.GstCtx.rip = uNewRip;
     2418        uNewEip &= UINT16_MAX;
     2419    if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
     2420        pVCpu->cpum.GstCtx.rip = uNewEip;
    24602421    else
    24612422        return iemRaiseGeneralProtectionFault0(pVCpu);
     
    24642425    iemOpcodeFlushLight(pVCpu, cbInstr);
    24652426#endif
    2466     return iemRegFinishNoFlags(pVCpu, rcNormal);
    2467 }
    2468 
    2469 
    2470 /**
    2471  * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
    2472  * code (never 64-bit), no checking or clearing of flags.
     2427
     2428    /*
     2429     * Clear RF and finish the instruction (maybe raise #DB).
     2430     */
     2431    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2432}
     2433
     2434
     2435/**
     2436 * Adds a 8-bit signed jump offset to EIP, on 386 or later from FLAT 32-bit code
     2437 * (never 64-bit).
    24732438 *
    24742439 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    24832448 *                              taking the wrong conditional branhc.
    24842449 */
     2450DECL_FORCE_INLINE(VBOXSTRICTRC)
     2451 iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2452                                                  IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2453{
     2454    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2455    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     2456
     2457    uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
     2458    if (enmEffOpSize == IEMMODE_16BIT)
     2459        uNewEip &= UINT16_MAX;
     2460    pVCpu->cpum.GstCtx.rip = uNewEip;
     2461
     2462#ifndef IEM_WITH_CODE_TLB
     2463    iemOpcodeFlushLight(pVCpu, cbInstr);
     2464#endif
     2465
     2466    /*
     2467     * Clear RF and finish the instruction (maybe raise #DB).
     2468     */
     2469    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2470}
     2471
     2472
     2473/**
     2474 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
     2475 *
     2476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2477 * segment limit.
     2478 *
     2479 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2480 * @param   cbInstr             Instruction size.
     2481 * @param   offNextInstr        The offset of the next instruction.
     2482 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2483 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2484 *                              taking the wrong conditional branhc.
     2485 */
     2486DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2487                                                                            int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2488{
     2489    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2490
     2491    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
     2492    if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
     2493        pVCpu->cpum.GstCtx.rip = uNewIp;
     2494    else
     2495        return iemRaiseGeneralProtectionFault0(pVCpu);
     2496
     2497#ifndef IEM_WITH_CODE_TLB
     2498    iemOpcodeFlushLight(pVCpu, cbInstr);
     2499#endif
     2500
     2501    /*
     2502     * Clear RF and finish the instruction (maybe raise #DB).
     2503     */
     2504    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2505}
     2506
     2507
     2508/**
     2509 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
     2510 * clearing of flags.
     2511 *
     2512 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2513 * segment limit.
     2514 *
     2515 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2516 * @param   cbInstr             Instruction size.
     2517 * @param   offNextInstr        The offset of the next instruction.
     2518 * @param   enmEffOpSize        Effective operand size.
     2519 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2520 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2521 *                              taking the wrong conditional branhc.
     2522 */
     2523DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2524                                                                          IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2525{
     2526    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2527    Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
     2528
     2529    uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2530    if (enmEffOpSize == IEMMODE_16BIT)
     2531        uNewRip &= UINT16_MAX;
     2532
     2533    if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
     2534        pVCpu->cpum.GstCtx.rip = uNewRip;
     2535    else
     2536        return iemRaiseGeneralProtectionFault0(pVCpu);
     2537
     2538#ifndef IEM_WITH_CODE_TLB
     2539    iemOpcodeFlushLight(pVCpu, cbInstr);
     2540#endif
     2541    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2542}
     2543
     2544
     2545/**
     2546 * Adds a 8-bit signed jump offset to RIP from 64-bit code when caller is sure
     2547 * it stays within the same page, no checking or clearing of flags.
     2548 *
     2549 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2550 * segment limit.
     2551 *
     2552 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2553 * @param   cbInstr             Instruction size.
     2554 * @param   offNextInstr        The offset of the next instruction.
     2555 * @param   enmEffOpSize        Effective operand size.
     2556 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2557 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2558 *                              taking the wrong conditional branhc.
     2559 */
     2560DECL_FORCE_INLINE(VBOXSTRICTRC)
     2561iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2562                                                 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2563{
     2564    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2565    Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
     2566
     2567    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2568    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     2569    pVCpu->cpum.GstCtx.rip = uNewRip;
     2570
     2571#ifndef IEM_WITH_CODE_TLB
     2572    iemOpcodeFlushLight(pVCpu, cbInstr);
     2573#endif
     2574    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2575}
     2576
     2577
     2578/**
     2579 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
     2580 * code (never 64-bit), no checking or clearing of flags.
     2581 *
     2582 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2583 * segment limit.
     2584 *
     2585 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2586 * @param   cbInstr             Instruction size.
     2587 * @param   offNextInstr        The offset of the next instruction.
     2588 * @param   enmEffOpSize        Effective operand size.
     2589 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2590 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2591 *                              taking the wrong conditional branhc.
     2592 */
    24852593DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    24862594                                                                          IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     
    25052613
    25062614/**
     2615 * Adds a 8-bit signed jump offset to EIP, on 386 or later from flat 32-bit code
     2616 * (never 64-bit), no checking or clearing of flags.
     2617 *
     2618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2619 * segment limit.
     2620 *
     2621 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2622 * @param   cbInstr             Instruction size.
     2623 * @param   offNextInstr        The offset of the next instruction.
     2624 * @param   enmEffOpSize        Effective operand size.
     2625 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2626 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2627 *                              taking the wrong conditional branhc.
     2628 */
     2629DECL_FORCE_INLINE(VBOXSTRICTRC)
     2630iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
     2631                                              IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
     2632{
     2633    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2634    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     2635
     2636    uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
     2637    if (enmEffOpSize == IEMMODE_16BIT)
     2638        uNewEip &= UINT16_MAX;
     2639    pVCpu->cpum.GstCtx.rip = uNewEip;
     2640
     2641#ifndef IEM_WITH_CODE_TLB
     2642    iemOpcodeFlushLight(pVCpu, cbInstr);
     2643#endif
     2644    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2645}
     2646
     2647
     2648/**
    25072649 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or
    25082650 * clearing of flags.
     
    26052747
    26062748/**
    2607  * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
    2608  * clearing of flags.
     2749 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code.
     2750 *
     2751 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2752 * segment limit.
    26092753 *
    26102754 * @returns Strict VBox status code.
     
    26152759 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    26162760 *                              taking the wrong conditional branhc.
    2617  */
    2618 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    2619                                                                            int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
    2620 {
    2621     Assert(IEM_IS_64BIT_CODE(pVCpu));
    2622 
    2623     pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
     2761 *
     2762 * @note    This is also used by 16-bit code in pre-386 mode, as the code is
     2763 *          identical.
     2764 */
     2765DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2766                                                                                  int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2767{
     2768    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2769
     2770    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
     2771    pVCpu->cpum.GstCtx.rip = uNewIp;
    26242772
    26252773#ifndef IEM_WITH_CODE_TLB
    26262774    iemOpcodeFlushLight(pVCpu, cbInstr);
    26272775#endif
    2628     return iemRegFinishNoFlags(pVCpu, rcNormal);
    2629 }
    2630 
    2631 
    2632 /**
    2633  * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
    2634  * no checking or clearing of flags.
    2635  *
    2636  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    2637  * segment limit.
     2776
     2777    /*
     2778     * Clear RF and finish the instruction (maybe raise #DB).
     2779     */
     2780    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2781}
     2782
     2783
     2784/**
     2785 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
     2786 * clearing of flags.
    26382787 *
    26392788 * @returns Strict VBox status code.
     
    26442793 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    26452794 *                              taking the wrong conditional branhc.
    2646  *
    2647  * @note    This is also used by 16-bit code in pre-386 mode, as the code is
    2648  *          identical.
    2649  */
    2650 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     2795 */
     2796DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    26512797                                                                           int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
    26522798{
    2653     Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2654 
    2655     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
    2656     if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2657         pVCpu->cpum.GstCtx.rip = uNewIp;
    2658     else
    2659         return iemRaiseGeneralProtectionFault0(pVCpu);
     2799    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2800
     2801    pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
    26602802
    26612803#ifndef IEM_WITH_CODE_TLB
     
    26672809
    26682810/**
    2669  * Adds a 32-bit signed jump offset to RIP from 64-bit code.
     2811 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
     2812 * no checking or clearing of flags.
    26702813 *
    26712814 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    26722815 * segment limit.
    2673  *
    2674  * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
    2675  * only alternative for relative jumps in 64-bit code and that is already
    2676  * handled in the decoder stage.
    26772816 *
    26782817 * @returns Strict VBox status code.
     
    26832822 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    26842823 *                              taking the wrong conditional branhc.
    2685  */
    2686 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
    2687                                                                               int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
    2688 {
    2689     Assert(IEM_IS_64BIT_CODE(pVCpu));
    2690 
    2691     uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    2692     if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    2693         pVCpu->cpum.GstCtx.rip = uNewRip;
     2824 *
     2825 * @note    This is also used by 16-bit code in pre-386 mode, as the code is
     2826 *          identical.
     2827 */
     2828DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     2829                                                                           int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2830{
     2831    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     2832
     2833    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
     2834    if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
     2835        pVCpu->cpum.GstCtx.rip = uNewIp;
    26942836    else
    26952837        return iemRaiseGeneralProtectionFault0(pVCpu);
     
    26982840    iemOpcodeFlushLight(pVCpu, cbInstr);
    26992841#endif
    2700 
    2701     /*
    2702      * Clear RF and finish the instruction (maybe raise #DB).
    2703      */
    2704     return iemRegFinishClearingRF(pVCpu, rcNormal);
    2705 }
    2706 
    2707 
    2708 /**
    2709  * Adds a 32-bit signed jump offset to RIP from 64-bit code.
     2842    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2843}
     2844
     2845
     2846/**
     2847 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code, no checking or
     2848 * clearing of flags.
    27102849 *
    27112850 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    27122851 * segment limit.
    2713  *
    2714  * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
    2715  * only alternative for relative jumps in 32-bit code and that is already
    2716  * handled in the decoder stage.
    27172852 *
    27182853 * @returns Strict VBox status code.
     
    27232858 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
    27242859 *                              taking the wrong conditional branhc.
    2725  */
    2726 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
    2727                                                                               int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2860 *
     2861 * @note    This is also used by 16-bit code in pre-386 mode, as the code is
     2862 *          identical.
     2863 */
     2864DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     2865                                                                               int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
    27282866{
    27292867    Assert(!IEM_IS_64BIT_CODE(pVCpu));
    2730     Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    2731 
    2732     uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
    2733     if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2734         pVCpu->cpum.GstCtx.rip = uNewEip;
    2735     else
    2736         return iemRaiseGeneralProtectionFault0(pVCpu);
     2868
     2869    uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
     2870    pVCpu->cpum.GstCtx.rip = uNewIp;
    27372871
    27382872#ifndef IEM_WITH_CODE_TLB
    27392873    iemOpcodeFlushLight(pVCpu, cbInstr);
    27402874#endif
    2741 
    2742     /*
    2743      * Clear RF and finish the instruction (maybe raise #DB).
    2744      */
    2745     return iemRegFinishClearingRF(pVCpu, rcNormal);
    2746 }
    2747 
    2748 
    2749 /**
    2750  * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
    2751  * clearing of flags.
     2875    return iemRegFinishNoFlags(pVCpu, rcNormal);
     2876}
     2877
     2878
     2879/**
     2880 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
    27522881 *
    27532882 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     
    27662895 *                              taking the wrong conditional branhc.
    27672896 */
    2768 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    2769                                                                            int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2897DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2898                                                                              int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
    27702899{
    27712900    Assert(IEM_IS_64BIT_CODE(pVCpu));
     
    27802909    iemOpcodeFlushLight(pVCpu, cbInstr);
    27812910#endif
    2782     return iemRegFinishNoFlags(pVCpu, rcNormal);
    2783 }
    2784 
    2785 
    2786 /**
    2787  * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
    2788  * clearing of flags.
     2911
     2912    /*
     2913     * Clear RF and finish the instruction (maybe raise #DB).
     2914     */
     2915    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2916}
     2917
     2918
     2919/**
     2920 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
     2921 * sure the target is in the same page.
    27892922 *
    27902923 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    27912924 * segment limit.
    27922925 *
    2793  * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
    2794  * only alternative for relative jumps in 32-bit code and that is already
     2926 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
     2927 * only alternative for relative jumps in 64-bit code and that is already
    27952928 * handled in the decoder stage.
    27962929 *
     
    28032936 *                              taking the wrong conditional branhc.
    28042937 */
    2805 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
    2806                                                                            int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2938DECL_FORCE_INLINE(VBOXSTRICTRC)
     2939iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2940                                                     int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     2941{
     2942    Assert(IEM_IS_64BIT_CODE(pVCpu));
     2943
     2944    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     2945    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     2946    pVCpu->cpum.GstCtx.rip = uNewRip;
     2947
     2948#ifndef IEM_WITH_CODE_TLB
     2949    iemOpcodeFlushLight(pVCpu, cbInstr);
     2950#endif
     2951
     2952    /*
     2953     * Clear RF and finish the instruction (maybe raise #DB).
     2954     */
     2955    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2956}
     2957
     2958
     2959/**
     2960 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
     2961 *
     2962 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     2963 * segment limit.
     2964 *
     2965 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     2966 * only alternative for relative jumps in 32-bit code and that is already
     2967 * handled in the decoder stage.
     2968 *
     2969 * @returns Strict VBox status code.
     2970 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     2971 * @param   cbInstr             Instruction size.
     2972 * @param   offNextInstr        The offset of the next instruction.
     2973 * @param   rcNormal            VINF_SUCCESS to continue TB.
     2974 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     2975 *                              taking the wrong conditional branhc.
     2976 */
     2977DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     2978                                                                              int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
    28072979{
    28082980    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     
    28142986    else
    28152987        return iemRaiseGeneralProtectionFault0(pVCpu);
     2988
     2989#ifndef IEM_WITH_CODE_TLB
     2990    iemOpcodeFlushLight(pVCpu, cbInstr);
     2991#endif
     2992
     2993    /*
     2994     * Clear RF and finish the instruction (maybe raise #DB).
     2995     */
     2996    return iemRegFinishClearingRF(pVCpu, rcNormal);
     2997}
     2998
     2999
     3000/**
     3001 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code.
     3002 *
     3003 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3004 * segment limit.
     3005 *
     3006 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     3007 * only alternative for relative jumps in 32-bit code and that is already
     3008 * handled in the decoder stage.
     3009 *
     3010 * @returns Strict VBox status code.
     3011 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3012 * @param   cbInstr             Instruction size.
     3013 * @param   offNextInstr        The offset of the next instruction.
     3014 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3015 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3016 *                              taking the wrong conditional branhc.
     3017 */
     3018DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
     3019                                                                                  int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3020{
     3021    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     3022    Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
     3023
     3024    uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
     3025    pVCpu->cpum.GstCtx.rip = uNewEip;
     3026
     3027#ifndef IEM_WITH_CODE_TLB
     3028    iemOpcodeFlushLight(pVCpu, cbInstr);
     3029#endif
     3030
     3031    /*
     3032     * Clear RF and finish the instruction (maybe raise #DB).
     3033     */
     3034    return iemRegFinishClearingRF(pVCpu, rcNormal);
     3035}
     3036
     3037
     3038
     3039/**
     3040 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
     3041 * clearing of flags.
     3042 *
     3043 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3044 * segment limit.
     3045 *
     3046 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
     3047 * only alternative for relative jumps in 64-bit code and that is already
     3048 * handled in the decoder stage.
     3049 *
     3050 * @returns Strict VBox status code.
     3051 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3052 * @param   cbInstr             Instruction size.
     3053 * @param   offNextInstr        The offset of the next instruction.
     3054 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3055 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3056 *                              taking the wrong conditional branhc.
     3057 */
     3058DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     3059                                                                           int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3060{
     3061    Assert(IEM_IS_64BIT_CODE(pVCpu));
     3062
     3063    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     3064    if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
     3065        pVCpu->cpum.GstCtx.rip = uNewRip;
     3066    else
     3067        return iemRaiseGeneralProtectionFault0(pVCpu);
     3068
     3069#ifndef IEM_WITH_CODE_TLB
     3070    iemOpcodeFlushLight(pVCpu, cbInstr);
     3071#endif
     3072    return iemRegFinishNoFlags(pVCpu, rcNormal);
     3073}
     3074
     3075
     3076/**
     3077 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
     3078 * sure it stays within the same page, no checking or clearing of flags.
     3079 *
     3080 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3081 * segment limit.
     3082 *
     3083 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
     3084 * only alternative for relative jumps in 64-bit code and that is already
     3085 * handled in the decoder stage.
     3086 *
     3087 * @returns Strict VBox status code.
     3088 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3089 * @param   cbInstr             Instruction size.
     3090 * @param   offNextInstr        The offset of the next instruction.
     3091 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3092 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3093 *                              taking the wrong conditional branhc.
     3094 */
     3095DECL_FORCE_INLINE(VBOXSTRICTRC)
     3096iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3097{
     3098    Assert(IEM_IS_64BIT_CODE(pVCpu));
     3099
     3100    uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
     3101    Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
     3102    pVCpu->cpum.GstCtx.rip = uNewRip;
     3103
     3104#ifndef IEM_WITH_CODE_TLB
     3105    iemOpcodeFlushLight(pVCpu, cbInstr);
     3106#endif
     3107    return iemRegFinishNoFlags(pVCpu, rcNormal);
     3108}
     3109
     3110
     3111/**
     3112 * Adds a 32-bit signed jump offset to RIP from 32-bit code, no checking or
     3113 * clearing of flags.
     3114 *
     3115 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3116 * segment limit.
     3117 *
     3118 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     3119 * only alternative for relative jumps in 32-bit code and that is already
     3120 * handled in the decoder stage.
     3121 *
     3122 * @returns Strict VBox status code.
     3123 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3124 * @param   cbInstr             Instruction size.
     3125 * @param   offNextInstr        The offset of the next instruction.
     3126 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3127 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3128 *                              taking the wrong conditional branhc.
     3129 */
     3130DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     3131                                                                           int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3132{
     3133    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     3134    Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
     3135
     3136    uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
     3137    if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
     3138        pVCpu->cpum.GstCtx.rip = uNewEip;
     3139    else
     3140        return iemRaiseGeneralProtectionFault0(pVCpu);
     3141
     3142#ifndef IEM_WITH_CODE_TLB
     3143    iemOpcodeFlushLight(pVCpu, cbInstr);
     3144#endif
     3145    return iemRegFinishNoFlags(pVCpu, rcNormal);
     3146}
     3147
     3148
     3149/**
     3150 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code, no checking or
     3151 * clearing of flags.
     3152 *
     3153 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
     3154 * segment limit.
     3155 *
     3156 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
     3157 * only alternative for relative jumps in 32-bit code and that is already
     3158 * handled in the decoder stage.
     3159 *
     3160 * @returns Strict VBox status code.
     3161 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     3162 * @param   cbInstr             Instruction size.
     3163 * @param   offNextInstr        The offset of the next instruction.
     3164 * @param   rcNormal            VINF_SUCCESS to continue TB.
     3165 *                              VINF_IEM_REEXEC_BREAK to force TB exit when
     3166 *                              taking the wrong conditional branhc.
     3167 */
     3168DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
     3169                                                                               int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
     3170{
     3171    Assert(!IEM_IS_64BIT_CODE(pVCpu));
     3172    Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
     3173
     3174    uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
     3175    pVCpu->cpum.GstCtx.rip = uNewEip;
    28163176
    28173177#ifndef IEM_WITH_CODE_TLB
  • trunk/src/VBox/VMM/include/IEMOpHlp.h

    r105295 r105768  
    751751    } while (0)
    752752
     753/**
     754 * Used the threaded code generator to check if a jump stays within the same
     755 * page in 64-bit code.
     756 */
     757#define IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(a_offDisp) \
     758     (   ((pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (a_offDisp)) >> GUEST_PAGE_SHIFT) \
     759      == (pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT))
     760
    753761VBOXSTRICTRC    iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT;
    754762VBOXSTRICTRC    iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette