Changeset 104051 in vbox for trunk/src/VBox/VMM/testcase
- Timestamp:
- Mar 26, 2024 2:10:26 AM (14 months ago)
- svn:sync-xref-src-repo-rev:
- 162442
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp
r103100 r104051 2250 2250 } 2251 2251 2252 2253 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG8B,(uint64_t *, PRTUINT64U, PRTUINT64U, uint32_t *)); 2254 2255 static uint64_t CmpXchg8bBench(uint32_t cIterations, FNIEMAIMPLCMPXCHG8B *pfn, uint64_t const uDstValue, 2256 uint64_t const uOldValue, uint64_t const uNewValue, uint32_t const fEflIn) 2257 { 2258 cIterations /= 4; 2259 RTThreadYield(); 2260 uint64_t const nsStart = RTTimeNanoTS(); 2261 for (uint32_t i = 0; i < cIterations; i++) 2262 { 2263 RTUINT64U uA, uB; 2264 uint32_t fEfl = fEflIn; 2265 uint64_t uDst = uDstValue; 2266 uB.u = uNewValue; 2267 uA.u = uOldValue; 2268 pfn(&uDst, &uA, &uB, &fEfl); 2269 2270 fEfl = fEflIn; 2271 uDst = uDstValue; 2272 uB.u = uNewValue; 2273 uA.u = uOldValue; 2274 pfn(&uDst, &uA, &uB, &fEfl); 2275 2276 fEfl = fEflIn; 2277 uDst = uDstValue; 2278 uB.u = uNewValue; 2279 uA.u = uOldValue; 2280 pfn(&uDst, &uA, &uB, &fEfl); 2281 2282 fEfl = fEflIn; 2283 uDst = uDstValue; 2284 uB.u = uNewValue; 2285 uA.u = uOldValue; 2286 pfn(&uDst, &uA, &uB, &fEfl); 2287 } 2288 return RTTimeNanoTS() - nsStart; 2289 } 2290 2252 2291 static void CmpXchg8bTest(void) 2253 2292 { 2254 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG8B,(uint64_t *, PRTUINT64U, PRTUINT64U, uint32_t *));2255 2293 static struct 2256 2294 { … … 2303 2341 (fEflIn & ~X86_EFL_ZF), uExpect, uExpect, EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF)); 2304 2342 RTTEST_CHECK(g_hTest, uB.u == uNewValue); 2343 2344 if (iTest == 2 && g_cPicoSecBenchmark && RTTestSubErrorCount(g_hTest) == 0) 2345 { 2346 uint32_t cIterations = EstimateIterations(_64K, CmpXchg8bBench(_64K, s_aFuncs[iFn].pfn, 2347 uOldValue, uOldValue, uNewValue, fEflIn)); 2348 uint64_t cNsRealRun = CmpXchg8bBench(cIterations, s_aFuncs[iFn].pfn, uOldValue, uOldValue, uNewValue, fEflIn); 2349 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, 2350 "%s-positive", s_aFuncs[iFn].pszName); 2351 2352 cIterations = EstimateIterations(_64K, CmpXchg8bBench(_64K, s_aFuncs[iFn].pfn, 2353 ~uOldValue, uOldValue, uNewValue, fEflIn)); 2354 cNsRealRun = CmpXchg8bBench(cIterations, s_aFuncs[iFn].pfn, ~uOldValue, uOldValue, uNewValue, fEflIn); 2355 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, 2356 "%s-negative", s_aFuncs[iFn].pszName); 2357 } 2305 2358 } 2306 2359 } … … 2423 2476 #endif 2424 2477 2425 #define TEST_SHIFT_DBL(a_cBits, a_ Type, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \2478 #define TEST_SHIFT_DBL(a_cBits, a_uType, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \ 2426 2479 TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLSHIFTDBLU ## a_cBits); \ 2427 2480 \ … … 2435 2488 \ 2436 2489 GEN_SHIFT_DBL(a_cBits, a_Fmt, a_TestType, a_aSubTests) \ 2490 \ 2491 static uint64_t ShiftDblU ## a_cBits ## Bench(uint32_t cIterations, PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn, a_TestType const *pEntry) \ 2492 { \ 2493 uint32_t const fEflIn = pEntry->fEflIn; \ 2494 a_uType const uDstIn = pEntry->uDstIn; \ 2495 a_uType const uSrcIn = pEntry->uSrcIn; \ 2496 a_uType const cShift = pEntry->uMisc; \ 2497 cIterations /= 4; \ 2498 RTThreadYield(); \ 2499 uint64_t const nsStart = RTTimeNanoTS(); \ 2500 for (uint32_t i = 0; i < cIterations; i++) \ 2501 { \ 2502 uint32_t fBenchEfl = fEflIn; \ 2503 a_uType uBenchDst = uDstIn; \ 2504 pfn(&uBenchDst, uSrcIn, cShift, &fBenchEfl); \ 2505 \ 2506 fBenchEfl = fEflIn; \ 2507 uBenchDst = uDstIn; \ 2508 pfn(&uBenchDst, uSrcIn, cShift, &fBenchEfl); \ 2509 \ 2510 fBenchEfl = fEflIn; \ 2511 uBenchDst = uDstIn; \ 2512 pfn(&uBenchDst, uSrcIn, cShift, &fBenchEfl); \ 2513 \ 2514 fBenchEfl = fEflIn; \ 2515 uBenchDst = uDstIn; \ 2516 pfn(&uBenchDst, uSrcIn, cShift, &fBenchEfl); \ 2517 } \ 2518 return RTTimeNanoTS() - nsStart; \ 2519 } \ 2437 2520 \ 2438 2521 static void ShiftDblU ## a_cBits ## Test(void) \ … … 2452 2535 { \ 2453 2536 uint32_t fEfl = paTests[iTest].fEflIn; \ 2454 a_ TypeuDst = paTests[iTest].uDstIn; \2537 a_uType uDst = paTests[iTest].uDstIn; \ 2455 2538 pfn(&uDst, paTests[iTest].uSrcIn, paTests[iTest].uMisc, &fEfl); \ 2456 2539 if ( uDst != paTests[iTest].uDstOut \ … … 2470 2553 } \ 2471 2554 } \ 2555 \ 2556 /* Benchmark if all succeeded. */ \ 2557 if (g_cPicoSecBenchmark && RTTestSubErrorCount(g_hTest) == 0) \ 2558 { \ 2559 uint32_t const iTest = cTests / 2; \ 2560 uint32_t const cIterations = EstimateIterations(_64K, ShiftDblU ## a_cBits ## Bench(_64K, pfn, &paTests[iTest])); \ 2561 uint64_t const cNsRealRun = ShiftDblU ## a_cBits ## Bench(cIterations, pfn, &paTests[iTest]); \ 2562 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, \ 2563 "%s%s", a_aSubTests[iFn].pszName, iVar ? "-native" : ""); \ 2564 } \ 2565 \ 2566 /* Next variation is native. */ \ 2472 2567 pfn = a_aSubTests[iFn].pfnNative; \ 2473 2568 } \ … … 2543 2638 #endif 2544 2639 2545 #define TEST_UNARY(a_cBits, a_ Type, a_Fmt, a_TestType, a_SubTestType) \2640 #define TEST_UNARY(a_cBits, a_uType, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \ 2546 2641 TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLUNARYU ## a_cBits); \ 2547 static a_SubTestType g_aUnaryU ## a_cBits[] = \2642 static a_SubTestType a_aSubTests[] = \ 2548 2643 { \ 2549 2644 ENTRY_BIN(inc_u ## a_cBits), \ … … 2557 2652 }; \ 2558 2653 \ 2559 GEN_UNARY(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType) \ 2654 GEN_UNARY(a_cBits, a_uType, a_Fmt, a_TestType, a_SubTestType) \ 2655 \ 2656 static uint64_t UnaryU ## a_cBits ## Bench(uint32_t cIterations, PFNIEMAIMPLUNARYU ## a_cBits pfn, a_TestType const *pEntry) \ 2657 { \ 2658 uint32_t const fEflIn = pEntry->fEflIn; \ 2659 a_uType const uDstIn = pEntry->uDstIn; \ 2660 cIterations /= 4; \ 2661 RTThreadYield(); \ 2662 uint64_t const nsStart = RTTimeNanoTS(); \ 2663 for (uint32_t i = 0; i < cIterations; i++) \ 2664 { \ 2665 uint32_t fBenchEfl = fEflIn; \ 2666 a_uType uBenchDst = uDstIn; \ 2667 pfn(&uBenchDst, &fBenchEfl); \ 2668 \ 2669 fBenchEfl = fEflIn; \ 2670 uBenchDst = uDstIn; \ 2671 pfn(&uBenchDst, &fBenchEfl); \ 2672 \ 2673 fBenchEfl = fEflIn; \ 2674 uBenchDst = uDstIn; \ 2675 pfn(&uBenchDst, &fBenchEfl); \ 2676 \ 2677 fBenchEfl = fEflIn; \ 2678 uBenchDst = uDstIn; \ 2679 pfn(&uBenchDst, &fBenchEfl); \ 2680 } \ 2681 return RTTimeNanoTS() - nsStart; \ 2682 } \ 2560 2683 \ 2561 2684 static void UnaryU ## a_cBits ## Test(void) \ 2562 2685 { \ 2563 for (size_t iFn = 0; iFn < RT_ELEMENTS( g_aUnaryU ## a_cBits); iFn++) \2686 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \ 2564 2687 { \ 2565 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS( g_aUnaryU ## a_cBits[iFn])) \2688 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \ 2566 2689 continue; \ 2567 a_TestType const * const paTests = g_aUnaryU ## a_cBits[iFn].paTests; \ 2568 uint32_t const cTests = g_aUnaryU ## a_cBits[iFn].cTests; \ 2690 PFNIEMAIMPLUNARYU ## a_cBits const pfn = a_aSubTests[iFn].pfn; \ 2691 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \ 2692 uint32_t const cTests = a_aSubTests[iFn].cTests; \ 2569 2693 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \ 2570 2694 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \ 2571 2695 { \ 2572 2696 uint32_t fEfl = paTests[iTest].fEflIn; \ 2573 a_ TypeuDst = paTests[iTest].uDstIn; \2574 g_aUnaryU ## a_cBits[iFn].pfn(&uDst, &fEfl); \2697 a_uType uDst = paTests[iTest].uDstIn; \ 2698 pfn(&uDst, &fEfl); \ 2575 2699 if ( uDst != paTests[iTest].uDstOut \ 2576 2700 || fEfl != paTests[iTest].fEflOut) \ … … 2583 2707 *g_pu ## a_cBits = paTests[iTest].uDstIn; \ 2584 2708 *g_pfEfl = paTests[iTest].fEflIn; \ 2585 g_aUnaryU ## a_cBits[iFn].pfn(g_pu ## a_cBits, g_pfEfl); \2709 pfn(g_pu ## a_cBits, g_pfEfl); \ 2586 2710 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \ 2587 2711 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \ 2588 2712 } \ 2589 2713 } \ 2590 FREE_DECOMPRESSED_TESTS(g_aUnaryU ## a_cBits[iFn]); \ 2714 \ 2715 if (g_cPicoSecBenchmark && RTTestSubErrorCount(g_hTest) == 0) \ 2716 { \ 2717 uint32_t const iTest = cTests / 2; \ 2718 uint32_t const cIterations = EstimateIterations(_64K, UnaryU ## a_cBits ## Bench(_64K, pfn, &paTests[iTest])); \ 2719 uint64_t const cNsRealRun = UnaryU ## a_cBits ## Bench(cIterations, pfn, &paTests[iTest]); \ 2720 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, "%s", a_aSubTests[iFn].pszName); \ 2721 } \ 2722 \ 2723 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \ 2591 2724 } \ 2592 2725 } 2593 TEST_UNARY(8, uint8_t, "%#04RX8", BINU8_TEST_T, INT_UNARY_U8_T )2594 TEST_UNARY(16, uint16_t, "%#06RX16", BINU16_TEST_T, INT_UNARY_U16_T )2595 TEST_UNARY(32, uint32_t, "%#010RX32", BINU32_TEST_T, INT_UNARY_U32_T )2596 TEST_UNARY(64, uint64_t, "%#018RX64", BINU64_TEST_T, INT_UNARY_U64_T )2726 TEST_UNARY(8, uint8_t, "%#04RX8", BINU8_TEST_T, INT_UNARY_U8_T, g_aUnaryU8) 2727 TEST_UNARY(16, uint16_t, "%#06RX16", BINU16_TEST_T, INT_UNARY_U16_T, g_aUnaryU16) 2728 TEST_UNARY(32, uint32_t, "%#010RX32", BINU32_TEST_T, INT_UNARY_U32_T, g_aUnaryU32) 2729 TEST_UNARY(64, uint64_t, "%#018RX64", BINU64_TEST_T, INT_UNARY_U64_T, g_aUnaryU64) 2597 2730 2598 2731 #ifdef TSTIEMAIMPL_WITH_GENERATOR … … 2674 2807 #endif 2675 2808 2676 #define TEST_SHIFT(a_cBits, a_ Type, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \2809 #define TEST_SHIFT(a_cBits, a_uType, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \ 2677 2810 TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLSHIFTU ## a_cBits); \ 2678 2811 static a_SubTestType a_aSubTests[] = \ … … 2685 2818 ENTRY_BIN_INTEL(rcl_u ## a_cBits, X86_EFL_OF), \ 2686 2819 ENTRY_BIN_AMD( rcr_u ## a_cBits, X86_EFL_OF), \ 2687 ENTRY_BIN_INTEL(rcr_u ## a_cBits, X86_EFL_OF), \2820 ENTRY_BIN_INTEL(rcr_u ## a_cBits, X86_EFL_OF), \ 2688 2821 ENTRY_BIN_AMD( shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \ 2689 2822 ENTRY_BIN_INTEL(shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \ … … 2695 2828 \ 2696 2829 GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests) \ 2830 \ 2831 static uint64_t ShiftU ## a_cBits ## Bench(uint32_t cIterations, PFNIEMAIMPLSHIFTU ## a_cBits pfn, a_TestType const *pEntry) \ 2832 { \ 2833 uint32_t const fEflIn = pEntry->fEflIn; \ 2834 a_uType const uDstIn = pEntry->uDstIn; \ 2835 a_uType const cShift = pEntry->uMisc; \ 2836 cIterations /= 4; \ 2837 RTThreadYield(); \ 2838 uint64_t const nsStart = RTTimeNanoTS(); \ 2839 for (uint32_t i = 0; i < cIterations; i++) \ 2840 { \ 2841 uint32_t fBenchEfl = fEflIn; \ 2842 a_uType uBenchDst = uDstIn; \ 2843 pfn(&uBenchDst, cShift, &fBenchEfl); \ 2844 \ 2845 fBenchEfl = fEflIn; \ 2846 uBenchDst = uDstIn; \ 2847 pfn(&uBenchDst, cShift, &fBenchEfl); \ 2848 \ 2849 fBenchEfl = fEflIn; \ 2850 uBenchDst = uDstIn; \ 2851 pfn(&uBenchDst, cShift, &fBenchEfl); \ 2852 \ 2853 fBenchEfl = fEflIn; \ 2854 uBenchDst = uDstIn; \ 2855 pfn(&uBenchDst, cShift, &fBenchEfl); \ 2856 } \ 2857 return RTTimeNanoTS() - nsStart; \ 2858 } \ 2697 2859 \ 2698 2860 static void ShiftU ## a_cBits ## Test(void) \ … … 2712 2874 { \ 2713 2875 uint32_t fEfl = paTests[iTest].fEflIn; \ 2714 a_ TypeuDst = paTests[iTest].uDstIn; \2876 a_uType uDst = paTests[iTest].uDstIn; \ 2715 2877 pfn(&uDst, paTests[iTest].uMisc, &fEfl); \ 2716 2878 if ( uDst != paTests[iTest].uDstOut \ … … 2730 2892 } \ 2731 2893 } \ 2894 \ 2895 /* Benchmark if all succeeded. */ \ 2896 if (g_cPicoSecBenchmark && RTTestSubErrorCount(g_hTest) == 0) \ 2897 { \ 2898 uint32_t const iTest = cTests / 2; \ 2899 uint32_t const cIterations = EstimateIterations(_64K, ShiftU ## a_cBits ## Bench(_64K, pfn, &paTests[iTest])); \ 2900 uint64_t const cNsRealRun = ShiftU ## a_cBits ## Bench(cIterations, pfn, &paTests[iTest]); \ 2901 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, \ 2902 "%s%s", a_aSubTests[iFn].pszName, iVar ? "-native" : ""); \ 2903 } \ 2904 \ 2905 /* Next variation is native. */ \ 2732 2906 pfn = a_aSubTests[iFn].pfnNative; \ 2733 2907 } \ … … 2853 3027 #endif 2854 3028 3029 static uint64_t MulDivU8Bench(uint32_t cIterations, PFNIEMAIMPLMULDIVU8 pfn, MULDIVU8_TEST_T const *pEntry) 3030 { 3031 uint32_t const fEflIn = pEntry->fEflIn; 3032 uint16_t const uDstIn = pEntry->uDstIn; 3033 uint8_t const uSrcIn = pEntry->uSrcIn; 3034 cIterations /= 4; 3035 RTThreadYield(); 3036 uint64_t const nsStart = RTTimeNanoTS(); 3037 for (uint32_t i = 0; i < cIterations; i++) 3038 { 3039 uint32_t fBenchEfl = fEflIn; 3040 uint16_t uBenchDst = uDstIn; 3041 pfn(&uBenchDst, uSrcIn, &fBenchEfl); 3042 3043 fBenchEfl = fEflIn; 3044 uBenchDst = uDstIn; 3045 pfn(&uBenchDst, uSrcIn, &fBenchEfl); 3046 3047 fBenchEfl = fEflIn; 3048 uBenchDst = uDstIn; 3049 pfn(&uBenchDst, uSrcIn, &fBenchEfl); 3050 3051 fBenchEfl = fEflIn; 3052 uBenchDst = uDstIn; 3053 pfn(&uBenchDst, uSrcIn, &fBenchEfl); 3054 } 3055 return RTTimeNanoTS() - nsStart; 3056 } 3057 2855 3058 static void MulDivU8Test(void) 2856 3059 { 2857 3060 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++) 2858 3061 { 2859 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aMulDivU8[iFn])) \2860 continue; \3062 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aMulDivU8[iFn])) 3063 continue; 2861 3064 MULDIVU8_TEST_T const * const paTests = g_aMulDivU8[iFn].paTests; 2862 3065 uint32_t const cTests = g_aMulDivU8[iFn].cTests; 2863 3066 uint32_t const fEflIgn = g_aMulDivU8[iFn].uExtra; 2864 3067 PFNIEMAIMPLMULDIVU8 pfn = g_aMulDivU8[iFn].pfn; 2865 uint32_t const cVars = COUNT_VARIATIONS(g_aMulDivU8[iFn]); \3068 uint32_t const cVars = COUNT_VARIATIONS(g_aMulDivU8[iFn]); 2866 3069 if (!cTests) RTTestSkipped(g_hTest, "no tests"); 2867 3070 for (uint32_t iVar = 0; iVar < cVars; iVar++) … … 2892 3095 } 2893 3096 } 3097 3098 /* Benchmark if all succeeded. */ 3099 if (g_cPicoSecBenchmark && RTTestSubErrorCount(g_hTest) == 0) 3100 { 3101 uint32_t const iTest = cTests / 2; 3102 uint32_t const cIterations = EstimateIterations(_64K, MulDivU8Bench(_64K, pfn, &paTests[iTest])); 3103 uint64_t const cNsRealRun = MulDivU8Bench(cIterations, pfn, &paTests[iTest]); 3104 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, 3105 "%s%s", g_aMulDivU8[iFn].pszName, iVar ? "-native" : ""); 3106 } 3107 3108 /* Next variation is native. */ 2894 3109 pfn = g_aMulDivU8[iFn].pfnNative; 2895 3110 } 2896 FREE_DECOMPRESSED_TESTS(g_aMulDivU8[iFn]); \3111 FREE_DECOMPRESSED_TESTS(g_aMulDivU8[iFn]); 2897 3112 } 2898 3113 } … … 2986 3201 #endif 2987 3202 2988 #define TEST_MULDIV(a_cBits, a_ Type, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \3203 #define TEST_MULDIV(a_cBits, a_uType, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \ 2989 3204 TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLMULDIVU ## a_cBits); \ 2990 3205 static a_SubTestType a_aSubTests [] = \ … … 3001 3216 \ 3002 3217 GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \ 3218 \ 3219 static uint64_t MulDivU ## a_cBits ## Bench(uint32_t cIterations, PFNIEMAIMPLMULDIVU ## a_cBits pfn, a_TestType const *pEntry) \ 3220 { \ 3221 uint32_t const fEflIn = pEntry->fEflIn; \ 3222 a_uType const uDst1In = pEntry->uDst1In; \ 3223 a_uType const uDst2In = pEntry->uDst2In; \ 3224 a_uType const uSrcIn = pEntry->uSrcIn; \ 3225 cIterations /= 4; \ 3226 RTThreadYield(); \ 3227 uint64_t const nsStart = RTTimeNanoTS(); \ 3228 for (uint32_t i = 0; i < cIterations; i++) \ 3229 { \ 3230 uint32_t fBenchEfl = fEflIn; \ 3231 a_uType uBenchDst1 = uDst1In; \ 3232 a_uType uBenchDst2 = uDst2In; \ 3233 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \ 3234 \ 3235 fBenchEfl = fEflIn; \ 3236 uBenchDst1 = uDst1In; \ 3237 uBenchDst2 = uDst2In; \ 3238 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \ 3239 \ 3240 fBenchEfl = fEflIn; \ 3241 uBenchDst1 = uDst1In; \ 3242 uBenchDst2 = uDst2In; \ 3243 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \ 3244 \ 3245 fBenchEfl = fEflIn; \ 3246 uBenchDst1 = uDst1In; \ 3247 uBenchDst2 = uDst2In; \ 3248 pfn(&uBenchDst1, &uBenchDst2, uSrcIn, &fBenchEfl); \ 3249 } \ 3250 return RTTimeNanoTS() - nsStart; \ 3251 } \ 3003 3252 \ 3004 3253 static void MulDivU ## a_cBits ## Test(void) \ … … 3019 3268 { \ 3020 3269 uint32_t fEfl = paTests[iTest].fEflIn; \ 3021 a_ TypeuDst1 = paTests[iTest].uDst1In; \3022 a_ TypeuDst2 = paTests[iTest].uDst2In; \3270 a_uType uDst1 = paTests[iTest].uDst1In; \ 3271 a_uType uDst2 = paTests[iTest].uDst2In; \ 3023 3272 int rc = pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, &fEfl); \ 3024 3273 if ( uDst1 != paTests[iTest].uDst1Out \ … … 3048 3297 } \ 3049 3298 } \ 3299 \ 3300 /* Benchmark if all succeeded. */ \ 3301 if (g_cPicoSecBenchmark && RTTestSubErrorCount(g_hTest) == 0) \ 3302 { \ 3303 uint32_t const iTest = cTests / 2; \ 3304 uint32_t const cIterations = EstimateIterations(_64K, MulDivU ## a_cBits ## Bench(_64K, pfn, &paTests[iTest])); \ 3305 uint64_t const cNsRealRun = MulDivU ## a_cBits ## Bench(cIterations, pfn, &paTests[iTest]); \ 3306 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, \ 3307 "%s%s", a_aSubTests[iFn].pszName, iVar ? "-native" : ""); \ 3308 } \ 3309 \ 3310 /* Next variation is native. */ \ 3050 3311 pfn = a_aSubTests[iFn].pfnNative; \ 3051 3312 } \
Note:
See TracChangeset
for help on using the changeset viewer.