- Timestamp:
- Sep 29, 2024 1:14:19 AM (2 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r105486 r106179 443 443 or T0_32, %1 ; combine the flags. ASSUMES T0 = eax! 444 444 ;mov %1, T0_32 ; save the flags. 445 %else 446 mov T0_32, %1 445 447 %endif 446 448 %endmacro … … 513 515 ; Calculates the new EFLAGS using fixed clear and set bit masks. 514 516 ; 515 ; @remarks Clobbers T0.517 ; @remarks Clobbers/returns T0. 516 518 ; @param 1 The parameter (A0..A3) holding the eflags value. 517 519 ; @param 2 Mask of additional flags to always clear 518 520 ; @param 3 Mask of additional flags to always set. 519 521 ; 520 %macro IEM_ADJUST_FLAGS 3 522 %macro IEM_ADJUST_FLAGS_RETVAL 3 523 mov T0_32, %1 ; Load flags. ASSUMES T0 is EAX! 521 524 %if (%2 | %3) != 0 522 mov T0_32, %1 ; Load flags.523 525 %if (%2) != 0 524 526 and T0_32, ~(%2) ; Remove the always cleared flags. … … 527 529 or T0_32, %3 ; Add the always set flags. 528 530 %endif 529 mov %1, T0_32 ; Save the result.530 531 %endif 531 532 %endmacro … … 2538 2539 ; The 8-bit function only operates on AX, so it takes no DX pointer. The other 2539 2540 ; functions takes a pointer to rAX in A0, rDX in A1, the operand in A2 and a 2540 ; pointer toeflags in A3.2541 ; 2542 ; The functions all return 0 so the caller can be used for div/idiv as well as2543 ; for the mul/imul implementation.2541 ; incoming eflags in A3. 2542 ; 2543 ; The functions all return eflags. Since valid eflags can't ever be zero, we can 2544 ; use the same macros/tests framework as div/idiv. 2544 2545 ; 2545 2546 ; @param 1 The instruction mnemonic. … … 2555 2556 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8 %+ %4, 12 2556 2557 PROLOGUE_3_ARGS 2557 IEM_MAYBE_LOAD_FLAGS _OLD A2, %2, %3, %3 ; Undefined flags may be passed thru (AMD)2558 IEM_MAYBE_LOAD_FLAGS A2_32, %2, %3, %3 ; Undefined flags may be passed thru (AMD) 2558 2559 mov al, [A0] 2559 2560 %1 A1_8 2560 2561 mov [A0], ax 2561 2562 %if %5 != 1 2562 IEM_SAVE_FLAGS_ OLD A2, %2, %3, 02563 IEM_SAVE_FLAGS_RETVAL A2_32, %2, %3, 0 2563 2564 %else 2564 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_OLD A2, %2, X86_EFL_AF | X86_EFL_ZF, ax, 8, xAX ; intel 2565 movzx edx, ax 2566 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_RETVAL A2_32, %2, X86_EFL_AF | X86_EFL_ZF, dx, 8, xDX ; intel 2565 2567 %endif 2566 xor eax, eax2567 2568 EPILOGUE_3_ARGS 2568 2569 ENDPROC iemAImpl_ %+ %1 %+ _u8 %+ %4 … … 2570 2571 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16 %+ %4, 16 2571 2572 PROLOGUE_4_ARGS 2572 IEM_MAYBE_LOAD_FLAGS _OLD A3, %2, %3, %3 ; Undefined flags may be passed thru (AMD)2573 IEM_MAYBE_LOAD_FLAGS A3_32, %2, %3, %3 ; Undefined flags may be passed thru (AMD) 2573 2574 mov ax, [A0] 2574 2575 %ifdef ASM_CALL64_GCC … … 2583 2584 %endif 2584 2585 %if %5 != 1 2585 IEM_SAVE_FLAGS_ OLD A3, %2, %3, 02586 IEM_SAVE_FLAGS_RETVAL A3_32, %2, %3, 0 2586 2587 %else 2587 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_OLD A3, %2, X86_EFL_AF | X86_EFL_ZF, ax, 16, xAX ; intel 2588 movzx edx, ax 2589 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_RETVAL A3_32, %2, X86_EFL_AF | X86_EFL_ZF, dx, 16, xDX ; intel 2588 2590 %endif 2589 xor eax, eax2590 2591 EPILOGUE_4_ARGS 2591 2592 ENDPROC iemAImpl_ %+ %1 %+ _u16 %+ %4 … … 2593 2594 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32 %+ %4, 16 2594 2595 PROLOGUE_4_ARGS 2595 IEM_MAYBE_LOAD_FLAGS _OLD A3, %2, %3, %3 ; Undefined flags may be passed thru (AMD)2596 IEM_MAYBE_LOAD_FLAGS A3_32, %2, %3, %3 ; Undefined flags may be passed thru (AMD) 2596 2597 mov eax, [A0] 2597 2598 %ifdef ASM_CALL64_GCC … … 2606 2607 %endif 2607 2608 %if %5 != 1 2608 IEM_SAVE_FLAGS_ OLD A3, %2, %3, 02609 IEM_SAVE_FLAGS_RETVAL A3_32, %2, %3, 0 2609 2610 %else 2610 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_OLD A3, %2, X86_EFL_AF | X86_EFL_ZF, eax, 32, xAX ; intel 2611 mov edx, eax 2612 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_RETVAL A3_32, %2, X86_EFL_AF | X86_EFL_ZF, edx, 32, xDX ; intel 2611 2613 %endif 2612 xor eax, eax2613 2614 EPILOGUE_4_ARGS 2614 2615 ENDPROC iemAImpl_ %+ %1 %+ _u32 %+ %4 … … 2617 2618 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64 %+ %4, 20 2618 2619 PROLOGUE_4_ARGS 2619 IEM_MAYBE_LOAD_FLAGS _OLD A3, %2, %3, %3 ; Undefined flags may be passed thru (AMD)2620 IEM_MAYBE_LOAD_FLAGS A3_32, %2, %3, %3 ; Undefined flags may be passed thru (AMD) 2620 2621 mov rax, [A0] 2621 2622 %ifdef ASM_CALL64_GCC … … 2630 2631 %endif 2631 2632 %if %5 != 1 2632 IEM_SAVE_FLAGS_ OLD A3, %2, %3, 02633 IEM_SAVE_FLAGS_RETVAL A3_32, %2, %3, 0 2633 2634 %else 2634 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_OLD A3, %2, X86_EFL_AF | X86_EFL_ZF, rax, 64, xAX ; intel 2635 mov T2, rax 2636 IEM_SAVE_FLAGS_ADJUST_AND_CALC_SF_PF_RETVAL A3_32, %2, X86_EFL_AF | X86_EFL_ZF, T2, 64, T2 ; intel 2635 2637 %endif 2636 xor eax, eax2637 2638 EPILOGUE_4_ARGS_EX 12 2638 2639 ENDPROC iemAImpl_ %+ %1 %+ _u64 %+ %4 2639 2640 %endif ; !RT_ARCH_AMD64 2640 2641 2641 %endmacro 2642 2642 … … 2688 2688 ; 2689 2689 ; The 8-bit function only operates on AX, so it takes no DX pointer. The other 2690 ; functions takes a pointer to rAX in A0, rDX in A1, the operand in A2 and a2691 ; pointer toeflags in A3.2692 ; 2693 ; The functions all return 0 on success and -1 if a divide error should be2694 ; raised by the caller.2690 ; functions takes a pointer to rAX in A0, rDX in A1, the operand in A2 and 2691 ; incoming eflags in A3. 2692 ; 2693 ; The functions returns the new EFLAGS on success and zero on divide error. 2694 ; The new EFLAGS value can never be zero, given that bit 1 always set. 2695 2695 ; 2696 2696 ; @param 1 The instruction mnemonic. … … 2751 2751 %endif 2752 2752 2753 IEM_MAYBE_LOAD_FLAGS _OLD A2, %2, %3, %3 ; Undefined flags may be passed thru (Intel)2753 IEM_MAYBE_LOAD_FLAGS A2_32, %2, %3, %3 ; Undefined flags may be passed thru (Intel) 2754 2754 mov ax, [A0] 2755 2755 %1 A1_8 2756 2756 mov [A0], ax 2757 2757 %if %6 == 2 ; AMD64 3990X: Set AF and clear PF, ZF and SF. 2758 IEM_ADJUST_FLAGS_ OLD A2, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF2758 IEM_ADJUST_FLAGS_RETVAL A2_32, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF 2759 2759 %else 2760 IEM_SAVE_FLAGS_ OLD A2, %2, %3, 02760 IEM_SAVE_FLAGS_RETVAL A2_32, %2, %3, 0 2761 2761 %endif 2762 xor eax, eax2763 2764 2762 .return: 2765 2763 EPILOGUE_3_ARGS … … 2767 2765 .div_zero: 2768 2766 .div_overflow: 2769 mov eax, -12767 xor eax, eax 2770 2768 jmp .return 2771 2769 ENDPROC iemAImpl_ %+ %1 %+ _u8 %+ %5 … … 2817 2815 %endif 2818 2816 2819 IEM_MAYBE_LOAD_FLAGS _OLD A3, %2, %3, %3 ; Undefined flags may be passed thru (AMD)2817 IEM_MAYBE_LOAD_FLAGS A3_32, %2, %3, %3 ; Undefined flags may be passed thru (AMD) 2820 2818 %ifdef ASM_CALL64_GCC 2821 2819 mov T1, A2 … … 2834 2832 %endif 2835 2833 %if %6 == 2 ; AMD64 3990X: Set AF and clear PF, ZF and SF. 2836 IEM_ADJUST_FLAGS_ OLD A3, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF2834 IEM_ADJUST_FLAGS_RETVAL A3_32, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF 2837 2835 %else 2838 IEM_SAVE_FLAGS_ OLD A3, %2, %3, 02836 IEM_SAVE_FLAGS_RETVAL A3_32, %2, %3, 0 2839 2837 %endif 2840 xor eax, eax2841 2838 2842 2839 .return: … … 2845 2842 .div_zero: 2846 2843 .div_overflow: 2847 mov eax, -12844 xor eax, eax 2848 2845 jmp .return 2849 2846 ENDPROC iemAImpl_ %+ %1 %+ _u16 %+ %5 … … 2903 2900 %endif 2904 2901 2905 IEM_MAYBE_LOAD_FLAGS _OLD A3, %2, %3, %3 ; Undefined flags may be passed thru (AMD)2902 IEM_MAYBE_LOAD_FLAGS A3_32, %2, %3, %3 ; Undefined flags may be passed thru (AMD) 2906 2903 mov eax, [A0] 2907 2904 %ifdef ASM_CALL64_GCC … … 2921 2918 %endif 2922 2919 %if %6 == 2 ; AMD64 3990X: Set AF and clear PF, ZF and SF. 2923 IEM_ADJUST_FLAGS_ OLD A3, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF2920 IEM_ADJUST_FLAGS_RETVAL A3_32, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF 2924 2921 %else 2925 IEM_SAVE_FLAGS_ OLD A3, %2, %3, 02922 IEM_SAVE_FLAGS_RETVAL A3_32, %2, %3, 0 2926 2923 %endif 2927 xor eax, eax2928 2924 2929 2925 .return: … … 2935 2931 %endif 2936 2932 .div_zero: 2937 mov eax, -12933 xor eax, eax 2938 2934 jmp .return 2939 2935 ENDPROC iemAImpl_ %+ %1 %+ _u32 %+ %5 … … 2991 2987 %endif 2992 2988 2993 IEM_MAYBE_LOAD_FLAGS _OLD A3, %2, %3, %3 ; Undefined flags may be passed thru (AMD)2989 IEM_MAYBE_LOAD_FLAGS A3_32, %2, %3, %3 ; Undefined flags may be passed thru (AMD) 2994 2990 mov rax, [A0] 2995 2991 %ifdef ASM_CALL64_GCC … … 3009 3005 %endif 3010 3006 %if %6 == 2 ; AMD64 3990X: Set AF and clear PF, ZF and SF. 3011 IEM_ADJUST_FLAGS_ OLD A3, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF3007 IEM_ADJUST_FLAGS_RETVAL A3_32, X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF, X86_EFL_AF 3012 3008 %else 3013 IEM_SAVE_FLAGS_ OLD A3, %2, %3, 03009 IEM_SAVE_FLAGS_RETVAL A3_32, %2, %3, 0 3014 3010 %endif 3015 xor eax, eax3016 3011 3017 3012 .return: … … 3023 3018 %endif 3024 3019 .div_zero: 3025 mov eax, -13020 xor eax, eax 3026 3021 jmp .return 3027 3022 ENDPROC iemAImpl_ %+ %1 %+ _u64 %+ %5 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r105462 r106179 2475 2475 */ 2476 2476 # define EMIT_MUL_INNER(a_cBitsWidth, a_cBitsWidth2x, a_Args, a_CallArgs, a_fnLoadF1, a_fnStore, a_fnMul, a_Suffix, a_fIntelFlags) \ 2477 IEM_DECL_IMPL_DEF( int, RT_CONCAT3(iemAImpl_mul_u,a_cBitsWidth,a_Suffix), a_Args) \2477 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_mul_u,a_cBitsWidth,a_Suffix), a_Args) \ 2478 2478 { \ 2479 2479 RTUINT ## a_cBitsWidth2x ## U Result; \ … … 2482 2482 \ 2483 2483 /* Calc EFLAGS: */ \ 2484 uint32_t fEfl = *pfEFlags; \2485 2484 if (a_fIntelFlags) \ 2486 2485 { /* Intel: 6700K and 10980XE behavior */ \ 2487 fE fl&= ~(X86_EFL_SF | X86_EFL_CF | X86_EFL_OF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF); \2486 fEFlags &= ~(X86_EFL_SF | X86_EFL_CF | X86_EFL_OF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF); \ 2488 2487 if (Result.s.Lo & RT_BIT_64(a_cBitsWidth - 1)) \ 2489 fE fl|= X86_EFL_SF; \2490 fE fl|= IEM_EFL_CALC_PARITY(Result.s.Lo); \2488 fEFlags |= X86_EFL_SF; \ 2489 fEFlags |= IEM_EFL_CALC_PARITY(Result.s.Lo); \ 2491 2490 if (Result.s.Hi != 0) \ 2492 fE fl|= X86_EFL_CF | X86_EFL_OF; \2491 fEFlags |= X86_EFL_CF | X86_EFL_OF; \ 2493 2492 } \ 2494 2493 else \ 2495 2494 { /* AMD: 3990X */ \ 2496 2495 if (Result.s.Hi != 0) \ 2497 fE fl|= X86_EFL_CF | X86_EFL_OF; \2496 fEFlags |= X86_EFL_CF | X86_EFL_OF; \ 2498 2497 else \ 2499 fE fl&= ~(X86_EFL_CF | X86_EFL_OF); \2498 fEFlags &= ~(X86_EFL_CF | X86_EFL_OF); \ 2500 2499 } \ 2501 *pfEFlags = fEfl; \ 2502 return 0; \ 2500 return fEFlags; \ 2503 2501 } \ 2504 2502 … … 2509 2507 2510 2508 # ifndef DOXYGEN_RUNNING /* this totally confuses doxygen for some reason */ 2511 EMIT_MUL(64, 128, (uint64_t *puA, uint64_t *puD, uint64_t uFactor, uint32_t *pfEFlags), (puA, puD, uFactor, pfEFlags),2509 EMIT_MUL(64, 128, (uint64_t *puA, uint64_t *puD, uint64_t uFactor, uint32_t fEFlags), (puA, puD, uFactor, pfEFlags), 2512 2510 MUL_LOAD_F1, MUL_STORE, MULDIV_MUL_U128) 2513 2511 # if !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 2514 EMIT_MUL(32, 64, (uint32_t *puA, uint32_t *puD, uint32_t uFactor, uint32_t *pfEFlags), (puA, puD, uFactor, pfEFlags),2512 EMIT_MUL(32, 64, (uint32_t *puA, uint32_t *puD, uint32_t uFactor, uint32_t fEFlags), (puA, puD, uFactor, pfEFlags), 2515 2513 MUL_LOAD_F1, MUL_STORE, MULDIV_MUL) 2516 EMIT_MUL(16, 32, (uint16_t *puA, uint16_t *puD, uint16_t uFactor, uint32_t *pfEFlags), (puA, puD, uFactor, pfEFlags),2514 EMIT_MUL(16, 32, (uint16_t *puA, uint16_t *puD, uint16_t uFactor, uint32_t fEFlags), (puA, puD, uFactor, pfEFlags), 2517 2515 MUL_LOAD_F1, MUL_STORE, MULDIV_MUL) 2518 EMIT_MUL(8, 16, (uint16_t *puAX, uint8_t uFactor, uint32_t *pfEFlags), (puAX, uFactor, pfEFlags),2516 EMIT_MUL(8, 16, (uint16_t *puAX, uint8_t uFactor, uint32_t fEFlags), (puAX, uFactor, pfEFlags), 2519 2517 MUL_LOAD_F1_U8, MUL_STORE_U8, MULDIV_MUL) 2520 2518 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ … … 2553 2551 # define EMIT_IMUL_INNER(a_cBitsWidth, a_cBitsWidth2x, a_Args, a_CallArgs, a_fnLoadF1, a_fnStore, a_fnNeg, a_fnMul, \ 2554 2552 a_Suffix, a_fIntelFlags) \ 2555 IEM_DECL_IMPL_DEF( int, RT_CONCAT3(iemAImpl_imul_u,a_cBitsWidth,a_Suffix),a_Args) \2553 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_imul_u,a_cBitsWidth,a_Suffix),a_Args) \ 2556 2554 { \ 2557 2555 RTUINT ## a_cBitsWidth2x ## U Result; \ 2558 uint32_t fEfl = *pfEFlags &~(X86_EFL_CF | X86_EFL_OF); \2556 fEFlags &= ~(X86_EFL_CF | X86_EFL_OF); \ 2559 2557 \ 2560 2558 uint ## a_cBitsWidth ## _t const uFactor1 = a_fnLoadF1(); \ … … 2565 2563 a_fnMul(Result, uFactor1, uFactor2, a_cBitsWidth2x); \ 2566 2564 if (Result.s.Hi != 0 || Result.s.Lo >= RT_BIT_64(a_cBitsWidth - 1)) \ 2567 fE fl|= X86_EFL_CF | X86_EFL_OF; \2565 fEFlags |= X86_EFL_CF | X86_EFL_OF; \ 2568 2566 } \ 2569 2567 else \ … … 2572 2570 a_fnMul(Result, uFactor1, uPositiveFactor2, a_cBitsWidth2x); \ 2573 2571 if (Result.s.Hi != 0 || Result.s.Lo > RT_BIT_64(a_cBitsWidth - 1)) \ 2574 fE fl|= X86_EFL_CF | X86_EFL_OF; \2572 fEFlags |= X86_EFL_CF | X86_EFL_OF; \ 2575 2573 a_fnNeg(Result, a_cBitsWidth2x); \ 2576 2574 } \ … … 2583 2581 a_fnMul(Result, uPositiveFactor1, uFactor2, a_cBitsWidth2x); \ 2584 2582 if (Result.s.Hi != 0 || Result.s.Lo > RT_BIT_64(a_cBitsWidth - 1)) \ 2585 fE fl|= X86_EFL_CF | X86_EFL_OF; \2583 fEFlags |= X86_EFL_CF | X86_EFL_OF; \ 2586 2584 a_fnNeg(Result, a_cBitsWidth2x); \ 2587 2585 } \ … … 2592 2590 a_fnMul(Result, uPositiveFactor1, uPositiveFactor2, a_cBitsWidth2x); \ 2593 2591 if (Result.s.Hi != 0 || Result.s.Lo >= RT_BIT_64(a_cBitsWidth - 1)) \ 2594 fE fl|= X86_EFL_CF | X86_EFL_OF; \2592 fEFlags |= X86_EFL_CF | X86_EFL_OF; \ 2595 2593 } \ 2596 2594 } \ … … 2599 2597 if (a_fIntelFlags) \ 2600 2598 { \ 2601 fE fl&= ~(X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_PF); \2599 fEFlags &= ~(X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_PF); \ 2602 2600 if (Result.s.Lo & RT_BIT_64(a_cBitsWidth - 1)) \ 2603 fE fl|= X86_EFL_SF; \2604 fE fl|= IEM_EFL_CALC_PARITY(Result.s.Lo & 0xff); \2601 fEFlags |= X86_EFL_SF; \ 2602 fEFlags |= IEM_EFL_CALC_PARITY(Result.s.Lo & 0xff); \ 2605 2603 } \ 2606 *pfEFlags = fEfl; \ 2607 return 0; \ 2604 return fEFlags; \ 2608 2605 } 2609 2606 # define EMIT_IMUL(a_cBitsWidth, a_cBitsWidth2x, a_Args, a_CallArgs, a_fnLoadF1, a_fnStore, a_fnNeg, a_fnMul) \ … … 2613 2610 2614 2611 # ifndef DOXYGEN_RUNNING /* this totally confuses doxygen for some reason */ 2615 EMIT_IMUL(64, 128, (uint64_t *puA, uint64_t *puD, uint64_t uFactor2, uint32_t *pfEFlags), (puA, puD, uFactor2, pfEFlags),2612 EMIT_IMUL(64, 128, (uint64_t *puA, uint64_t *puD, uint64_t uFactor2, uint32_t fEFlags), (puA, puD, uFactor2, pfEFlags), 2616 2613 MUL_LOAD_F1, MUL_STORE, MULDIV_NEG_U128, MULDIV_MUL_U128) 2617 2614 # if !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 2618 EMIT_IMUL(32, 64, (uint32_t *puA, uint32_t *puD, uint32_t uFactor2, uint32_t *pfEFlags), (puA, puD, uFactor2, pfEFlags),2615 EMIT_IMUL(32, 64, (uint32_t *puA, uint32_t *puD, uint32_t uFactor2, uint32_t fEFlags), (puA, puD, uFactor2, pfEFlags), 2619 2616 MUL_LOAD_F1, MUL_STORE, MULDIV_NEG, MULDIV_MUL) 2620 EMIT_IMUL(16, 32, (uint16_t *puA, uint16_t *puD, uint16_t uFactor2, uint32_t *pfEFlags), (puA, puD, uFactor2, pfEFlags),2617 EMIT_IMUL(16, 32, (uint16_t *puA, uint16_t *puD, uint16_t uFactor2, uint32_t fEFlags), (puA, puD, uFactor2, pfEFlags), 2621 2618 MUL_LOAD_F1, MUL_STORE, MULDIV_NEG, MULDIV_MUL) 2622 EMIT_IMUL(8, 16, (uint16_t *puAX, uint8_t uFactor2, uint32_t *pfEFlags), (puAX, uFactor2, pfEFlags),2619 EMIT_IMUL(8, 16, (uint16_t *puAX, uint8_t uFactor2, uint32_t fEFlags), (puAX, uFactor2, pfEFlags), 2623 2620 MUL_LOAD_F1_U8, MUL_STORE_U8, MULDIV_NEG, MULDIV_MUL) 2624 2621 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ … … 2634 2631 { \ 2635 2632 a_uType uIgn; \ 2636 iemAImpl_imul_u ## a_cBits(puDst, &uIgn, uSrc, &fEFlags); \ 2637 return fEFlags; \ 2633 return iemAImpl_imul_u ## a_cBits(puDst, &uIgn, uSrc, fEFlags); \ 2638 2634 } \ 2639 2635 \ … … 2641 2637 { \ 2642 2638 a_uType uIgn; \ 2643 iemAImpl_imul_u ## a_cBits ## _intel(puDst, &uIgn, uSrc, &fEFlags); \ 2644 return fEFlags; \ 2639 return iemAImpl_imul_u ## a_cBits ## _intel(puDst, &uIgn, uSrc, fEFlags); \ 2645 2640 } \ 2646 2641 \ … … 2648 2643 { \ 2649 2644 a_uType uIgn; \ 2650 iemAImpl_imul_u ## a_cBits ## _amd(puDst, &uIgn, uSrc, &fEFlags); \ 2651 return fEFlags; \ 2645 return iemAImpl_imul_u ## a_cBits ## _amd(puDst, &uIgn, uSrc, fEFlags); \ 2652 2646 } 2653 2647 … … 2664 2658 # define EMIT_DIV_INNER(a_cBitsWidth, a_cBitsWidth2x, a_Args, a_CallArgs, a_fnLoad, a_fnStore, a_fnDivRem, \ 2665 2659 a_Suffix, a_fIntelFlags) \ 2666 IEM_DECL_IMPL_DEF( int, RT_CONCAT3(iemAImpl_div_u,a_cBitsWidth,a_Suffix),a_Args) \2660 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_div_u,a_cBitsWidth,a_Suffix),a_Args) \ 2667 2661 { \ 2668 2662 RTUINT ## a_cBitsWidth2x ## U Dividend; \ … … 2677 2671 /* Calc EFLAGS: Intel 6700K and 10980XE leaves them alone. AMD 3990X sets AF and clears PF, ZF and SF. */ \ 2678 2672 if (!a_fIntelFlags) \ 2679 *pfEFlags = (*pfEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \2680 return 0; \2673 fEFlags = (fEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \ 2674 return fEFlags; \ 2681 2675 } \ 2682 2676 /* #DE */ \ 2683 return -1; \2677 return 0; \ 2684 2678 } 2685 2679 # define EMIT_DIV(a_cBitsWidth, a_cBitsWidth2x, a_Args, a_CallArgs, a_fnLoad, a_fnStore, a_fnDivRem) \ … … 2689 2683 2690 2684 # ifndef DOXYGEN_RUNNING /* this totally confuses doxygen for some reason */ 2691 EMIT_DIV(64,128,(uint64_t *puA, uint64_t *puD, uint64_t uDivisor, uint32_t *pfEFlags), (puA, puD, uDivisor, pfEFlags),2685 EMIT_DIV(64,128,(uint64_t *puA, uint64_t *puD, uint64_t uDivisor, uint32_t fEFlags), (puA, puD, uDivisor, pfEFlags), 2692 2686 DIV_LOAD, DIV_STORE, MULDIV_MODDIV_U128) 2693 2687 # if !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 2694 EMIT_DIV(32,64, (uint32_t *puA, uint32_t *puD, uint32_t uDivisor, uint32_t *pfEFlags), (puA, puD, uDivisor, pfEFlags),2688 EMIT_DIV(32,64, (uint32_t *puA, uint32_t *puD, uint32_t uDivisor, uint32_t fEFlags), (puA, puD, uDivisor, pfEFlags), 2695 2689 DIV_LOAD, DIV_STORE, MULDIV_MODDIV) 2696 EMIT_DIV(16,32, (uint16_t *puA, uint16_t *puD, uint16_t uDivisor, uint32_t *pfEFlags), (puA, puD, uDivisor, pfEFlags),2690 EMIT_DIV(16,32, (uint16_t *puA, uint16_t *puD, uint16_t uDivisor, uint32_t fEFlags), (puA, puD, uDivisor, pfEFlags), 2697 2691 DIV_LOAD, DIV_STORE, MULDIV_MODDIV) 2698 EMIT_DIV(8,16, (uint16_t *puAX, uint8_t uDivisor, uint32_t *pfEFlags), (puAX, uDivisor, pfEFlags),2692 EMIT_DIV(8,16, (uint16_t *puAX, uint8_t uDivisor, uint32_t fEFlags), (puAX, uDivisor, pfEFlags), 2699 2693 DIV_LOAD_U8, DIV_STORE_U8, MULDIV_MODDIV) 2700 2694 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ … … 2711 2705 # define EMIT_IDIV_INNER(a_cBitsWidth, a_cBitsWidth2x, a_Args, a_CallArgs, a_fnLoad, a_fnStore, a_fnNeg, a_fnDivRem, \ 2712 2706 a_Suffix, a_fIntelFlags) \ 2713 IEM_DECL_IMPL_DEF( int, RT_CONCAT3(iemAImpl_idiv_u,a_cBitsWidth,a_Suffix),a_Args) \2707 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_idiv_u,a_cBitsWidth,a_Suffix),a_Args) \ 2714 2708 { \ 2715 2709 /* Note! Skylake leaves all flags alone. */ \ … … 2748 2742 a_fnStore(Quotient.s.Lo, Remainder.s.Lo); \ 2749 2743 if (!a_fIntelFlags) \ 2750 *pfEFlags = (*pfEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \2751 return 0; \2744 fEFlags = (fEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \ 2745 return fEFlags; \ 2752 2746 } \ 2753 2747 } \ … … 2759 2753 a_fnStore(UINT ## a_cBitsWidth ## _C(0) - Quotient.s.Lo, UINT ## a_cBitsWidth ## _C(0) - Remainder.s.Lo); \ 2760 2754 if (!a_fIntelFlags) \ 2761 *pfEFlags = (*pfEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \2762 return 0; \2755 fEFlags = (fEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \ 2756 return fEFlags; \ 2763 2757 } \ 2764 2758 } \ … … 2773 2767 a_fnStore(UINT ## a_cBitsWidth ## _C(0) - Quotient.s.Lo, Remainder.s.Lo); \ 2774 2768 if (!a_fIntelFlags) \ 2775 *pfEFlags = (*pfEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \2776 return 0; \2769 fEFlags = (fEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \ 2770 return fEFlags; \ 2777 2771 } \ 2778 2772 } \ … … 2784 2778 a_fnStore(Quotient.s.Lo, UINT ## a_cBitsWidth ## _C(0) - Remainder.s.Lo); \ 2785 2779 if (!a_fIntelFlags) \ 2786 *pfEFlags = (*pfEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \2787 return 0; \2780 fEFlags = (fEFlags & ~(X86_EFL_PF | X86_EFL_ZF | X86_EFL_SF)) | X86_EFL_AF; \ 2781 return fEFlags; \ 2788 2782 } \ 2789 2783 } \ … … 2791 2785 } \ 2792 2786 /* #DE */ \ 2793 return -1; \2787 return 0; \ 2794 2788 } 2795 2789 # define EMIT_IDIV(a_cBitsWidth, a_cBitsWidth2x, a_Args, a_CallArgs, a_fnLoad, a_fnStore, a_fnNeg, a_fnDivRem) \ … … 2799 2793 2800 2794 # ifndef DOXYGEN_RUNNING /* this totally confuses doxygen for some reason */ 2801 EMIT_IDIV(64,128,(uint64_t *puA, uint64_t *puD, uint64_t uDivisor, uint32_t *pfEFlags), (puA, puD, uDivisor, pfEFlags),2795 EMIT_IDIV(64,128,(uint64_t *puA, uint64_t *puD, uint64_t uDivisor, uint32_t fEFlags), (puA, puD, uDivisor, pfEFlags), 2802 2796 DIV_LOAD, DIV_STORE, MULDIV_NEG_U128, MULDIV_MODDIV_U128) 2803 2797 # if !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 2804 EMIT_IDIV(32,64,(uint32_t *puA, uint32_t *puD, uint32_t uDivisor, uint32_t *pfEFlags), (puA, puD, uDivisor, pfEFlags),2798 EMIT_IDIV(32,64,(uint32_t *puA, uint32_t *puD, uint32_t uDivisor, uint32_t fEFlags), (puA, puD, uDivisor, pfEFlags), 2805 2799 DIV_LOAD, DIV_STORE, MULDIV_NEG, MULDIV_MODDIV) 2806 EMIT_IDIV(16,32,(uint16_t *puA, uint16_t *puD, uint16_t uDivisor, uint32_t *pfEFlags), (puA, puD, uDivisor, pfEFlags),2800 EMIT_IDIV(16,32,(uint16_t *puA, uint16_t *puD, uint16_t uDivisor, uint32_t fEFlags), (puA, puD, uDivisor, pfEFlags), 2807 2801 DIV_LOAD, DIV_STORE, MULDIV_NEG, MULDIV_MODDIV) 2808 EMIT_IDIV(8,16,(uint16_t *puAX, uint8_t uDivisor, uint32_t *pfEFlags), (puAX, uDivisor, pfEFlags),2802 EMIT_IDIV(8,16,(uint16_t *puAX, uint8_t uDivisor, uint32_t fEFlags), (puAX, uDivisor, pfEFlags), 2809 2803 DIV_LOAD_U8, DIV_STORE_U8, MULDIV_NEG, MULDIV_MODDIV) 2810 2804 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r106061 r106179 14564 14564 IEM_MC_BEGIN(0, 0); \ 14565 14565 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14566 IEM_MC_ARG(uint8_t, u8Value, 1); \ 14567 IEM_MC_FETCH_GREG_U8(u8Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14566 14568 IEM_MC_ARG(uint16_t *, pu16AX, 0); \ 14567 IEM_MC_ARG(uint8_t, u8Value, 1); \ 14568 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 14569 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); \ 14570 IEM_MC_ARG_EFLAGS( fEFlagsIn, 2); \ 14571 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pfnU8, pu16AX, u8Value, fEFlagsIn); \ 14569 14572 \ 14570 IEM_MC_FETCH_GREG_U8(u8Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14571 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); \ 14572 IEM_MC_REF_EFLAGS(pEFlags); \ 14573 IEM_MC_CALL_AIMPL_3(int32_t, rc, pfnU8, pu16AX, u8Value, pEFlags); \ 14574 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14575 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14576 } IEM_MC_ELSE() { \ 14577 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14578 } IEM_MC_ENDIF(); \ 14579 \ 14573 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14574 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14575 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14580 14576 IEM_MC_END(); \ 14581 14577 } \ … … 14584 14580 /* memory access. */ \ 14585 14581 IEM_MC_BEGIN(0, 0); \ 14586 IEM_MC_ARG(uint16_t *, pu16AX, 0); \14587 IEM_MC_ARG(uint8_t, u8Value, 1); \14588 IEM_MC_ARG(uint32_t *, pEFlags, 2); \14589 14582 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 14590 \14591 14583 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 14592 14584 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14585 \ 14586 IEM_MC_ARG(uint8_t, u8Value, 1); \ 14593 14587 IEM_MC_FETCH_MEM_U8(u8Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 14588 IEM_MC_ARG(uint16_t *, pu16AX, 0); \ 14594 14589 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); \ 14595 IEM_MC_REF_EFLAGS(pEFlags); \ 14596 IEM_MC_CALL_AIMPL_3(int32_t, rc, pfnU8, pu16AX, u8Value, pEFlags); \ 14597 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14598 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14599 } IEM_MC_ELSE() { \ 14600 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14601 } IEM_MC_ENDIF(); \ 14590 IEM_MC_ARG_EFLAGS( fEFlagsIn, 2); \ 14591 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pfnU8, pu16AX, u8Value, fEFlagsIn); \ 14602 14592 \ 14593 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14594 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14595 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14603 14596 IEM_MC_END(); \ 14604 14597 } (void)0 … … 14616 14609 IEM_MC_BEGIN(0, 0); \ 14617 14610 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14611 IEM_MC_ARG(uint16_t, u16Value, 2); \ 14612 IEM_MC_FETCH_GREG_U16(u16Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14618 14613 IEM_MC_ARG(uint16_t *, pu16AX, 0); \ 14614 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); \ 14619 14615 IEM_MC_ARG(uint16_t *, pu16DX, 1); \ 14620 IEM_MC_ARG(uint16_t, u16Value, 2); \ 14621 IEM_MC_ARG(uint32_t *, pEFlags, 3); \ 14616 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX); \ 14617 IEM_MC_ARG_EFLAGS( fEFlagsIn, 3); \ 14618 IEM_MC_CALL_AIMPL_4(uint32_t, fEFlagsRet, pImpl->pfnU16, pu16AX, pu16DX, u16Value, fEFlagsIn); \ 14622 14619 \ 14623 IEM_MC_FETCH_GREG_U16(u16Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14624 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); \ 14625 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX); \ 14626 IEM_MC_REF_EFLAGS(pEFlags); \ 14627 IEM_MC_CALL_AIMPL_4(int32_t, rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags); \ 14628 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14629 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14630 } IEM_MC_ELSE() { \ 14631 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14632 } IEM_MC_ENDIF(); \ 14633 \ 14620 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14621 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14622 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14634 14623 IEM_MC_END(); \ 14635 14624 break; \ … … 14638 14627 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 14639 14628 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14629 IEM_MC_ARG(uint32_t, u32Value, 2); \ 14630 IEM_MC_FETCH_GREG_U32(u32Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14640 14631 IEM_MC_ARG(uint32_t *, pu32AX, 0); \ 14632 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX); \ 14641 14633 IEM_MC_ARG(uint32_t *, pu32DX, 1); \ 14642 IEM_MC_ARG(uint32_t, u32Value, 2); \ 14643 IEM_MC_ARG(uint32_t *, pEFlags, 3); \ 14634 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX); \ 14635 IEM_MC_ARG_EFLAGS( fEFlagsIn, 3); \ 14636 IEM_MC_CALL_AIMPL_4(uint32_t, fEFlagsRet, pImpl->pfnU32, pu32AX, pu32DX, u32Value, fEFlagsIn); \ 14644 14637 \ 14645 IEM_MC_FETCH_GREG_U32(u32Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14646 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX); \ 14647 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX); \ 14648 IEM_MC_REF_EFLAGS(pEFlags); \ 14649 IEM_MC_CALL_AIMPL_4(int32_t, rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags); \ 14650 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14651 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xAX); \ 14652 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xDX); \ 14653 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14654 } IEM_MC_ELSE() { \ 14655 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14656 } IEM_MC_ENDIF(); \ 14657 \ 14638 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14639 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14640 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xAX); \ 14641 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xDX); \ 14642 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14658 14643 IEM_MC_END(); \ 14659 14644 break; \ … … 14662 14647 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 14663 14648 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14649 IEM_MC_ARG(uint64_t, u64Value, 2); \ 14650 IEM_MC_FETCH_GREG_U64(u64Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14664 14651 IEM_MC_ARG(uint64_t *, pu64AX, 0); \ 14652 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX); \ 14665 14653 IEM_MC_ARG(uint64_t *, pu64DX, 1); \ 14666 IEM_MC_ARG(uint64_t, u64Value, 2); \ 14667 IEM_MC_ARG(uint32_t *, pEFlags, 3); \ 14654 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX); \ 14655 IEM_MC_ARG_EFLAGS( fEFlagsIn, 3); \ 14656 IEM_MC_CALL_AIMPL_4(uint32_t, fEFlagsRet, pImpl->pfnU64, pu64AX, pu64DX, u64Value, fEFlagsIn); \ 14668 14657 \ 14669 IEM_MC_FETCH_GREG_U64(u64Value, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 14670 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX); \ 14671 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX); \ 14672 IEM_MC_REF_EFLAGS(pEFlags); \ 14673 IEM_MC_CALL_AIMPL_4(int32_t, rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags); \ 14674 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14675 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14676 } IEM_MC_ELSE() { \ 14677 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14678 } IEM_MC_ENDIF(); \ 14679 \ 14658 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14659 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14660 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14680 14661 IEM_MC_END(); \ 14681 14662 break; \ … … 14691 14672 case IEMMODE_16BIT: \ 14692 14673 IEM_MC_BEGIN(0, 0); \ 14693 IEM_MC_ARG(uint16_t *, pu16AX, 0); \14694 IEM_MC_ARG(uint16_t *, pu16DX, 1); \14695 IEM_MC_ARG(uint16_t, u16Value, 2); \14696 IEM_MC_ARG(uint32_t *, pEFlags, 3); \14697 14674 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 14698 \14699 14675 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 14700 14676 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14677 \ 14678 IEM_MC_ARG(uint16_t, u16Value, 2); \ 14701 14679 IEM_MC_FETCH_MEM_U16(u16Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 14680 IEM_MC_ARG(uint16_t *, pu16AX, 0); \ 14702 14681 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX); \ 14682 IEM_MC_ARG(uint16_t *, pu16DX, 1); \ 14703 14683 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX); \ 14704 IEM_MC_REF_EFLAGS(pEFlags); \ 14705 IEM_MC_CALL_AIMPL_4(int32_t, rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags); \ 14706 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14707 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14708 } IEM_MC_ELSE() { \ 14709 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14710 } IEM_MC_ENDIF(); \ 14684 IEM_MC_ARG_EFLAGS( fEFlagsIn, 3); \ 14685 IEM_MC_CALL_AIMPL_4(uint32_t, fEFlagsRet, pImpl->pfnU16, pu16AX, pu16DX, u16Value, fEFlagsIn); \ 14711 14686 \ 14687 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14688 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14689 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14712 14690 IEM_MC_END(); \ 14713 14691 break; \ … … 14715 14693 case IEMMODE_32BIT: \ 14716 14694 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 14717 IEM_MC_ARG(uint32_t *, pu32AX, 0); \14718 IEM_MC_ARG(uint32_t *, pu32DX, 1); \14719 IEM_MC_ARG(uint32_t, u32Value, 2); \14720 IEM_MC_ARG(uint32_t *, pEFlags, 3); \14721 14695 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 14722 \14723 14696 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 14724 14697 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14698 \ 14699 IEM_MC_ARG(uint32_t, u32Value, 2); \ 14725 14700 IEM_MC_FETCH_MEM_U32(u32Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 14701 IEM_MC_ARG(uint32_t *, pu32AX, 0); \ 14726 14702 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX); \ 14703 IEM_MC_ARG(uint32_t *, pu32DX, 1); \ 14727 14704 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX); \ 14728 IEM_MC_REF_EFLAGS(pEFlags); \ 14729 IEM_MC_CALL_AIMPL_4(int32_t, rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags); \ 14730 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14731 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xAX); \ 14732 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xDX); \ 14733 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14734 } IEM_MC_ELSE() { \ 14735 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14736 } IEM_MC_ENDIF(); \ 14705 IEM_MC_ARG_EFLAGS( fEFlagsIn, 3); \ 14706 IEM_MC_CALL_AIMPL_4(uint32_t, fEFlagsRet, pImpl->pfnU32, pu32AX, pu32DX, u32Value, fEFlagsIn); \ 14737 14707 \ 14708 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14709 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xAX); \ 14710 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xDX); \ 14711 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14712 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14738 14713 IEM_MC_END(); \ 14739 14714 break; \ … … 14741 14716 case IEMMODE_64BIT: \ 14742 14717 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 14743 IEM_MC_ARG(uint64_t *, pu64AX, 0); \14744 IEM_MC_ARG(uint64_t *, pu64DX, 1); \14745 IEM_MC_ARG(uint64_t, u64Value, 2); \14746 IEM_MC_ARG(uint32_t *, pEFlags, 3); \14747 14718 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 14748 \14749 14719 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 14750 14720 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 14721 \ 14722 IEM_MC_ARG(uint64_t, u64Value, 2); \ 14751 14723 IEM_MC_FETCH_MEM_U64(u64Value, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 14724 IEM_MC_ARG(uint64_t *, pu64AX, 0); \ 14752 14725 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX); \ 14726 IEM_MC_ARG(uint64_t *, pu64DX, 1); \ 14753 14727 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX); \ 14754 IEM_MC_REF_EFLAGS(pEFlags); \ 14755 IEM_MC_CALL_AIMPL_4(int32_t, rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags); \ 14756 IEM_MC_IF_LOCAL_IS_Z(rc) { \ 14757 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14758 } IEM_MC_ELSE() { \ 14759 IEM_MC_RAISE_DIVIDE_ERROR(); \ 14760 } IEM_MC_ENDIF(); \ 14728 IEM_MC_ARG_EFLAGS( fEFlagsIn, 3); \ 14729 IEM_MC_CALL_AIMPL_4(uint32_t, fEFlagsRet, pImpl->pfnU64, pu64AX, pu64DX, u64Value, fEFlagsIn); \ 14761 14730 \ 14731 IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(fEFlagsRet); \ 14732 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 14733 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 14762 14734 IEM_MC_END(); \ 14763 14735 break; \ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r106097 r106179 3304 3304 'IEM_MC_PUSH_U32_SREG': (McBlock.parseMcGeneric, True, True, True, ), 3305 3305 'IEM_MC_PUSH_U64': (McBlock.parseMcGeneric, True, True, True, ), 3306 'IEM_MC_RAISE_DIVIDE_ERROR ':(McBlock.parseMcGeneric, True, True, True, ),3306 'IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO': (McBlock.parseMcGeneric, True, True, True, ), 3307 3307 'IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO': (McBlock.parseMcGeneric, True, True, False, ), 3308 3308 'IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED': (McBlock.parseMcGeneric, True, True, True, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.h
r106114 r106179 508 508 #define IEM_MC_NO_NATIVE_RECOMPILE() NOP() 509 509 510 #define IEM_MC_RAISE_DIVIDE_ERROR ()IEM_LIVENESS_MARK_POTENTIAL_CALL()510 #define IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(a_uVar) IEM_LIVENESS_MARK_POTENTIAL_CALL() 511 511 #define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() IEM_LIVENESS_MARK_POTENTIAL_CALL(); IEM_LIVENESS_CR0_INPUT() 512 512 #define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() IEM_LIVENESS_MARK_POTENTIAL_CALL(); IEM_LIVENESS_CR0_INPUT() -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r106123 r106179 2922 2922 2923 2923 2924 #define IEM_MC_RAISE_DIVIDE_ERROR () \2925 off = iemNativeEmitRaiseDivideError (pReNative, off, pCallEntry->idxInstr)2924 #define IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(a_uVar) \ 2925 off = iemNativeEmitRaiseDivideErrorIfLocalIsZero(pReNative, off, a_uVar, pCallEntry->idxInstr) 2926 2926 2927 2927 /** 2928 * Emits code to raise a \#DE .2928 * Emits code to raise a \#DE if a local variable is zero. 2929 2929 * 2930 2930 * @returns New code buffer offset, UINT32_MAX on failure. 2931 2931 * @param pReNative The native recompile state. 2932 2932 * @param off The code buffer offset. 2933 * @param idxVar The variable to check. This must be 32-bit (EFLAGS). 2933 2934 * @param idxInstr The current instruction. 2934 2935 */ 2935 2936 DECL_INLINE_THROW(uint32_t) 2936 iemNativeEmitRaiseDivideError(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr) 2937 { 2938 /* 2939 * Make sure we don't have any outstanding guest register writes as we may 2940 */ 2937 iemNativeEmitRaiseDivideErrorIfLocalIsZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar, uint8_t idxInstr) 2938 { 2939 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar); 2940 IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVar, sizeof(uint32_t)); 2941 2942 /* Make sure we don't have any outstanding guest register writes as we may. */ 2941 2943 off = iemNativeRegFlushPendingWrites(pReNative, off); 2942 2944 2945 /* Set the instruction number if we're counting. */ 2943 2946 #ifdef IEMNATIVE_WITH_INSTRUCTION_COUNTING 2944 2947 off = iemNativeEmitStoreImmToVCpuU8(pReNative, off, idxInstr, RT_UOFFSETOF(VMCPUCC, iem.s.idxTbCurInstr)); … … 2947 2950 #endif 2948 2951 2949 /* raise \#DE exception unconditionally. */ 2950 return iemNativeEmitTbExit(pReNative, off, kIemNativeLabelType_RaiseDe); 2952 /* Do the job we're here for. */ 2953 uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVar, &off); 2954 off = iemNativeEmitTestIfGprIsZeroAndTbExit(pReNative, off, idxVarReg, false /*f64Bit*/, kIemNativeLabelType_RaiseDe); 2955 iemNativeVarRegisterRelease(pReNative, idxVar); 2956 2957 return off; 2951 2958 } 2952 2959 … … 2984 2991 2985 2992 uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVarEffAddr, &off); 2986 2987 2993 off = iemNativeEmitTestAnyBitsInGprAndTbExitIfAnySet(pReNative, off, idxVarReg, cbAlign - 1, 2988 2994 kIemNativeLabelType_RaiseGp0); 2989 2990 2995 iemNativeVarRegisterRelease(pReNative, idxVarEffAddr); 2996 2991 2997 return off; 2992 2998 } -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r106090 r106179 2723 2723 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_') 2724 2724 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_') 2725 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR ',)):2725 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO',)): 2726 2726 aoDecoderStmts.pop(); 2727 2727 if not fIsConditional: -
trunk/src/VBox/VMM/include/IEMInternal.h
r106125 r106179 2366 2366 /** Native recompiler: Number of times status flags calc has been skipped. */ 2367 2367 STAMCOUNTER StatNativeEflSkippedArithmetic; 2368 /** Native recompiler: Number of times status flags calc has been postponed. */ 2369 STAMCOUNTER StatNativeEflPostponedArithmetic; 2368 2370 /** Native recompiler: Total number instructions in this category. */ 2369 2371 STAMCOUNTER StatNativeEflTotalArithmetic; … … 2371 2373 /** Native recompiler: Number of times status flags calc has been skipped. */ 2372 2374 STAMCOUNTER StatNativeEflSkippedLogical; 2375 /** Native recompiler: Number of times status flags calc has been postponed. */ 2376 STAMCOUNTER StatNativeEflPostponedLogical; 2373 2377 /** Native recompiler: Total number instructions in this category. */ 2374 2378 STAMCOUNTER StatNativeEflTotalLogical; … … 2376 2380 /** Native recompiler: Number of times status flags calc has been skipped. */ 2377 2381 STAMCOUNTER StatNativeEflSkippedShift; 2382 /** Native recompiler: Number of times status flags calc has been postponed. */ 2383 STAMCOUNTER StatNativeEflPostponedShift; 2378 2384 /** Native recompiler: Total number instructions in this category. */ 2379 2385 STAMCOUNTER StatNativeEflTotalShift; … … 2550 2556 2551 2557 #ifdef IEM_WITH_TLB_TRACE 2552 //uint64_t au64Padding[0];2558 uint64_t au64Padding[5]; 2553 2559 #else 2554 uint64_t au64Padding[ 2];2560 uint64_t au64Padding[7]; 2555 2561 #endif 2556 2562 … … 3538 3544 /** @name Multiplication and division operations. 3539 3545 * @{ */ 3540 typedef IEM_DECL_IMPL_TYPE( int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));3546 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t fEFlags)); 3541 3547 typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8; 3542 3548 FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel; … … 3545 3551 FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel; 3546 3552 3547 typedef IEM_DECL_IMPL_TYPE( int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));3553 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t fEFlags)); 3548 3554 typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16; 3549 3555 FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel; … … 3552 3558 FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel; 3553 3559 3554 typedef IEM_DECL_IMPL_TYPE( int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));3560 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t fEFlags)); 3555 3561 typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32; 3556 3562 FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel; … … 3559 3565 FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel; 3560 3566 3561 typedef IEM_DECL_IMPL_TYPE( int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));3567 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t fEFlags)); 3562 3568 typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64; 3563 3569 FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel; -
trunk/src/VBox/VMM/include/IEMMc.h
r106097 r106179 109 109 110 110 111 #define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu) 111 #define IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(a_uVar) \ 112 do { \ 113 if (RT_LIKELY((a_uVar) != 0)) \ 114 { /* probable */ } \ 115 else return iemRaiseDivideError(pVCpu); \ 116 } while (0) 112 117 #define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \ 113 118 do { \ -
trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp
r105275 r106179 2991 2991 MULDIVU8_TEST_T Test; 2992 2992 Test.fEflIn = RandEFlags(); 2993 Test.fEflOut = Test.fEflIn;2994 2993 Test.uDstIn = RandU16Dst(iTest); 2995 2994 Test.uDstOut = Test.uDstIn; 2996 2995 Test.uSrcIn = RandU8Src(iTest); 2997 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); 2996 uint32_t const fEflRet = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, Test.fEflIn); 2997 Test.fEflOut = fEflRet ? fEflRet : Test.fEflIn; 2998 Test.rc = fEflRet ? 0 : -1; 2998 2999 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); 2999 3000 } … … 3003 3004 Test.fEflIn = g_aMulDivU8[iFn].paFixedTests[iTest].fEflIn == UINT32_MAX ? RandEFlags() 3004 3005 : g_aMulDivU8[iFn].paFixedTests[iTest].fEflIn; 3005 Test.fEflOut = Test.fEflIn;3006 3006 Test.uDstIn = g_aMulDivU8[iFn].paFixedTests[iTest].uDstIn; 3007 3007 Test.uDstOut = Test.uDstIn; 3008 3008 Test.uSrcIn = g_aMulDivU8[iFn].paFixedTests[iTest].uSrcIn; 3009 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); 3009 uint32_t const fEflRet = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, Test.fEflIn); 3010 Test.fEflOut = fEflRet ? fEflRet : Test.fEflIn; 3011 Test.rc = fEflRet ? 0 : -1; 3010 3012 if (g_aMulDivU8[iFn].paFixedTests[iTest].rc == 0 || g_aMulDivU8[iFn].paFixedTests[iTest].rc == -1) 3011 3013 Test.rc = g_aMulDivU8[iFn].paFixedTests[iTest].rc; … … 3022 3024 uint32_t const fEflIn = pEntry->fEflIn; 3023 3025 uint16_t const uDstIn = pEntry->uDstIn; 3024 uint8_t const uSrcIn 3026 uint8_t const uSrcIn = pEntry->uSrcIn; 3025 3027 cIterations /= 4; 3026 3028 RTThreadYield(); 3027 uint64_t const nsStart 3029 uint64_t const nsStart = RTTimeNanoTS(); 3028 3030 for (uint32_t i = 0; i < cIterations; i++) 3029 3031 { 3030 uint32_t fBenchEfl = fEflIn;3031 3032 uint16_t uBenchDst = uDstIn; 3032 pfn(&uBenchDst, uSrcIn, &fBenchEfl); 3033 3034 fBenchEfl = fEflIn; 3033 pfn(&uBenchDst, uSrcIn, fEflIn); 3034 3035 3035 uBenchDst = uDstIn; 3036 pfn(&uBenchDst, uSrcIn, &fBenchEfl); 3037 3038 fBenchEfl = fEflIn; 3036 pfn(&uBenchDst, uSrcIn, fEflIn); 3037 3039 3038 uBenchDst = uDstIn; 3040 pfn(&uBenchDst, uSrcIn, &fBenchEfl); 3041 3042 fBenchEfl = fEflIn; 3039 pfn(&uBenchDst, uSrcIn, fEflIn); 3040 3043 3041 uBenchDst = uDstIn; 3044 pfn(&uBenchDst, uSrcIn, &fBenchEfl);3042 pfn(&uBenchDst, uSrcIn, fEflIn); 3045 3043 } 3046 3044 return RTTimeNanoTS() - nsStart; … … 3063 3061 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) 3064 3062 { 3065 uint32_t fEfl = paTests[iTest].fEflIn; 3066 uint16_t uDst = paTests[iTest].uDstIn; 3067 int rc = g_aMulDivU8[iFn].pfn(&uDst, paTests[iTest].uSrcIn, &fEfl); 3063 uint16_t uDst = paTests[iTest].uDstIn; 3064 uint32_t fEfl = pfn(&uDst, paTests[iTest].uSrcIn, paTests[iTest].fEflIn); 3065 int rc = fEfl ? 0 : -1; 3066 fEfl = fEfl ? fEfl : paTests[iTest].fEflIn; 3068 3067 if ( uDst != paTests[iTest].uDstOut 3069 3068 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn) … … 3078 3077 else 3079 3078 { 3080 *g_pu16 = paTests[iTest].uDstIn; 3081 *g_pfEfl = paTests[iTest].fEflIn; 3082 rc = g_aMulDivU8[iFn].pfn(g_pu16, paTests[iTest].uSrcIn, g_pfEfl); 3083 RTTEST_CHECK(g_hTest, *g_pu16 == paTests[iTest].uDstOut); 3084 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); 3085 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); 3079 *g_pu16 = paTests[iTest].uDstIn; 3080 fEfl = pfn(g_pu16, paTests[iTest].uSrcIn, paTests[iTest].fEflIn); 3081 rc = fEfl ? 0 : -1; 3082 fEfl = fEfl ? fEfl : paTests[iTest].fEflIn; 3083 RTTEST_CHECK(g_hTest, *g_pu16 == paTests[iTest].uDstOut); 3084 RTTEST_CHECK(g_hTest, (fEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); 3085 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); 3086 3086 } 3087 3087 } … … 3160 3160 { \ 3161 3161 Test.fEflIn = RandEFlags(); \ 3162 Test.fEflOut = Test.fEflIn; \3163 3162 Test.uDst1In = RandU ## a_cBits ## Dst(iTest); \ 3164 3163 Test.uDst1Out = Test.uDst1In; \ … … 3166 3165 Test.uDst2Out = Test.uDst2In; \ 3167 3166 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \ 3168 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \ 3167 uint32_t const fEflRet = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, Test.fEflIn); \ 3168 Test.fEflOut = fEflRet ? fEflRet : Test.fEflIn; \ 3169 Test.rc = fEflRet ? 0 : -1; \ 3169 3170 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \ 3170 3171 } \ … … 3173 3174 Test.fEflIn = a_aSubTests[iFn].paFixedTests[iTest].fEflIn == UINT32_MAX ? RandEFlags() \ 3174 3175 : a_aSubTests[iFn].paFixedTests[iTest].fEflIn; \ 3175 Test.fEflOut = Test.fEflIn; \3176 3176 Test.uDst1In = a_aSubTests[iFn].paFixedTests[iTest].uDst1In; \ 3177 3177 Test.uDst1Out = Test.uDst1In; \ … … 3179 3179 Test.uDst2Out = Test.uDst2In; \ 3180 3180 Test.uSrcIn = a_aSubTests[iFn].paFixedTests[iTest].uSrcIn; \ 3181 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \ 3181 uint32_t const fEflRet = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, Test.fEflIn); \ 3182 Test.fEflOut = fEflRet ? fEflRet : Test.fEflIn; \ 3183 Test.rc = fEflRet ? 0 : -1; \ 3182 3184 if (a_aSubTests[iFn].paFixedTests[iTest].rc == 0 || a_aSubTests[iFn].paFixedTests[iTest].rc == -1) \ 3183 3185 Test.rc = a_aSubTests[iFn].paFixedTests[iTest].rc; \ … … 3216 3218 cIterations /= 4; \ 3217 3219 RTThreadYield(); \ 3218 uint64_t const nsStart 3220 uint64_t const nsStart = RTTimeNanoTS(); \ 3219 3221 for (uint32_t i = 0; i < cIterations; i++) \ 3220 3222 { \ 3221 uint32_t fBenchEfl = fEflIn; \3222 3223 a_uType uBenchDst1 = uDst1In; \ 3223 3224 a_uType uBenchDst2 = uDst2In; \ 3224 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \3225 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, fEflIn); \ 3225 3226 \ 3226 fBenchEfl = fEflIn; \3227 3227 uBenchDst1 = uDst1In; \ 3228 3228 uBenchDst2 = uDst2In; \ 3229 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \3229 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, fEflIn); \ 3230 3230 \ 3231 fBenchEfl = fEflIn; \3232 3231 uBenchDst1 = uDst1In; \ 3233 3232 uBenchDst2 = uDst2In; \ 3234 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \3233 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, fEflIn); \ 3235 3234 \ 3236 fBenchEfl = fEflIn; \3237 3235 uBenchDst1 = uDst1In; \ 3238 3236 uBenchDst2 = uDst2In; \ 3239 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \3237 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, fEflIn); \ 3240 3238 } \ 3241 3239 return RTTimeNanoTS() - nsStart; \ … … 3258 3256 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \ 3259 3257 { \ 3260 uint32_t fEfl = paTests[iTest].fEflIn; \3261 3258 a_uType uDst1 = paTests[iTest].uDst1In; \ 3262 3259 a_uType uDst2 = paTests[iTest].uDst2In; \ 3263 int rc = pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, &fEfl); \ 3260 uint32_t fEfl = pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, paTests[iTest].fEflIn); \ 3261 int rc = fEfl ? 0 : -1; \ 3262 fEfl = fEfl ? fEfl : paTests[iTest].fEflIn; \ 3264 3263 if ( uDst1 != paTests[iTest].uDst1Out \ 3265 3264 || uDst2 != paTests[iTest].uDst2Out \ … … 3280 3279 *g_pu ## a_cBits = paTests[iTest].uDst1In; \ 3281 3280 *g_pu ## a_cBits ## Two = paTests[iTest].uDst2In; \ 3282 *g_pfEfl = paTests[iTest].fEflIn; \ 3283 rc = pfn(g_pu ## a_cBits, g_pu ## a_cBits ## Two, paTests[iTest].uSrcIn, g_pfEfl); \ 3281 fEfl = pfn(g_pu ## a_cBits, g_pu ## a_cBits ## Two, paTests[iTest].uSrcIn, paTests[iTest].fEflIn); \ 3282 rc = fEfl ? 0 : -1; \ 3283 fEfl = fEfl ? fEfl : paTests[iTest].fEflIn; \ 3284 3284 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDst1Out); \ 3285 3285 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits ## Two == paTests[iTest].uDst2Out); \ 3286 RTTEST_CHECK(g_hTest, ( *g_pfEfl | fEflIgn)== (paTests[iTest].fEflOut | fEflIgn)); \3286 RTTEST_CHECK(g_hTest, (fEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); \ 3287 3287 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); \ 3288 3288 } \ -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r106097 r106179 618 618 #define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP) do { (void)fMcBegin; CHK_TYPE(uint64_t, a_u64NewIP); return VINF_SUCCESS; } while (0) 619 619 #define IEM_MC_RETN_AND_FINISH(a_u16Pop) do { (void)fMcBegin; return VINF_SUCCESS; } while (0) 620 #define IEM_MC_RAISE_DIVIDE_ERROR () do { (void)fMcBegin;return VERR_TRPM_ACTIVE_TRAP; } while (0)620 #define IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(a_uVar) do { (void)fMcBegin; CHK_VAR(a_uVar); if (a_uVar == 0) return VERR_TRPM_ACTIVE_TRAP; } while (0) 621 621 #define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() do { (void)fMcBegin; } while (0) 622 622 #define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() do { (void)fMcBegin; } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.