VirtualBox

source: vbox/trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp@ 94339

Last change on this file since 94339 was 94339, checked in by vboxsync, 3 years ago

tstIEMAImpl: Separated out the fld/fst related fpu data and how its generated. Reworked options again. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 117.7 KB
Line 
1/* $Id: tstIEMAImpl.cpp 94339 2022-03-23 14:01:48Z vboxsync $ */
2/** @file
3 * IEM Assembly Instruction Helper Testcase.
4 */
5
6/*
7 * Copyright (C) 2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include "../include/IEMInternal.h"
23
24#include <iprt/errcore.h>
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/ctype.h>
28#include <iprt/getopt.h>
29#include <iprt/initterm.h>
30#include <iprt/message.h>
31#include <iprt/mp.h>
32#include <iprt/rand.h>
33#include <iprt/stream.h>
34#include <iprt/string.h>
35#include <iprt/test.h>
36
37
38/*********************************************************************************************************************************
39* Structures and Typedefs *
40*********************************************************************************************************************************/
41/** @name 8-bit binary (PFNIEMAIMPLBINU8)
42 * @{ */
43typedef struct BINU8_TEST_T
44{
45 uint32_t fEflIn;
46 uint32_t fEflOut;
47 uint8_t uDstIn;
48 uint8_t uDstOut;
49 uint8_t uSrcIn;
50 uint8_t uMisc;
51} BINU8_TEST_T;
52
53typedef struct BINU8_T
54{
55 const char *pszName;
56 PFNIEMAIMPLBINU8 pfn;
57 PFNIEMAIMPLBINU8 pfnNative;
58 BINU8_TEST_T const *paTests;
59 uint32_t cTests;
60 uint32_t uExtra;
61 uint8_t idxCpuEflFlavour;
62} BINU8_T;
63/** @} */
64
65
66/** @name 16-bit binary (PFNIEMAIMPLBINU16)
67 * @{ */
68typedef struct BINU16_TEST_T
69{
70 uint32_t fEflIn;
71 uint32_t fEflOut;
72 uint16_t uDstIn;
73 uint16_t uDstOut;
74 uint16_t uSrcIn;
75 uint16_t uMisc;
76} BINU16_TEST_T;
77
78typedef struct BINU16_T
79{
80 const char *pszName;
81 PFNIEMAIMPLBINU16 pfn;
82 PFNIEMAIMPLBINU16 pfnNative;
83 BINU16_TEST_T const *paTests;
84 uint32_t cTests;
85 uint32_t uExtra;
86 uint8_t idxCpuEflFlavour;
87} BINU16_T;
88/** @} */
89
90
91/** @name 32-bit binary (PFNIEMAIMPLBINU32)
92 * @{ */
93typedef struct BINU32_TEST_T
94{
95 uint32_t fEflIn;
96 uint32_t fEflOut;
97 uint32_t uDstIn;
98 uint32_t uDstOut;
99 uint32_t uSrcIn;
100 uint32_t uMisc;
101} BINU32_TEST_T;
102
103typedef struct BINU32_T
104{
105 const char *pszName;
106 PFNIEMAIMPLBINU32 pfn;
107 PFNIEMAIMPLBINU32 pfnNative;
108 BINU32_TEST_T const *paTests;
109 uint32_t cTests;
110 uint32_t uExtra;
111 uint8_t idxCpuEflFlavour;
112} BINU32_T;
113/** @} */
114
115
116/** @name 64-bit binary (PFNIEMAIMPLBINU64)
117 * @{ */
118typedef struct BINU64_TEST_T
119{
120 uint32_t fEflIn;
121 uint32_t fEflOut;
122 uint64_t uDstIn;
123 uint64_t uDstOut;
124 uint64_t uSrcIn;
125 uint64_t uMisc;
126} BINU64_TEST_T;
127
128typedef struct BINU64_T
129{
130 const char *pszName;
131 PFNIEMAIMPLBINU64 pfn;
132 PFNIEMAIMPLBINU64 pfnNative;
133 BINU64_TEST_T const *paTests;
134 uint32_t cTests;
135 uint32_t uExtra;
136 uint8_t idxCpuEflFlavour;
137} BINU64_T;
138/** @} */
139
140
141/** @name mult/div (PFNIEMAIMPLBINU8, PFNIEMAIMPLBINU16, PFNIEMAIMPLBINU32, PFNIEMAIMPLBINU64)
142 * @{ */
143typedef struct MULDIVU8_TEST_T
144{
145 uint32_t fEflIn;
146 uint32_t fEflOut;
147 uint16_t uDstIn;
148 uint16_t uDstOut;
149 uint8_t uSrcIn;
150 int32_t rc;
151} MULDIVU8_TEST_T;
152
153typedef struct MULDIVU16_TEST_T
154{
155 uint32_t fEflIn;
156 uint32_t fEflOut;
157 uint16_t uDst1In;
158 uint16_t uDst1Out;
159 uint16_t uDst2In;
160 uint16_t uDst2Out;
161 uint16_t uSrcIn;
162 int32_t rc;
163} MULDIVU16_TEST_T;
164
165typedef struct MULDIVU32_TEST_T
166{
167 uint32_t fEflIn;
168 uint32_t fEflOut;
169 uint32_t uDst1In;
170 uint32_t uDst1Out;
171 uint32_t uDst2In;
172 uint32_t uDst2Out;
173 uint32_t uSrcIn;
174 int32_t rc;
175} MULDIVU32_TEST_T;
176
177typedef struct MULDIVU64_TEST_T
178{
179 uint32_t fEflIn;
180 uint32_t fEflOut;
181 uint64_t uDst1In;
182 uint64_t uDst1Out;
183 uint64_t uDst2In;
184 uint64_t uDst2Out;
185 uint64_t uSrcIn;
186 int32_t rc;
187} MULDIVU64_TEST_T;
188/** @} */
189
190
191/*********************************************************************************************************************************
192* Defined Constants And Macros *
193*********************************************************************************************************************************/
194#define ENTRY(a_Name) ENTRY_EX(a_Name, 0)
195#define ENTRY_EX(a_Name, a_uExtra) \
196 { RT_XSTR(a_Name), iemAImpl_ ## a_Name, NULL, \
197 g_aTests_ ## a_Name, RT_ELEMENTS(g_aTests_ ## a_Name), \
198 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
199
200#define ENTRY_INTEL(a_Name, a_fEflUndef) ENTRY_INTEL_EX(a_Name, a_fEflUndef, 0)
201#define ENTRY_INTEL_EX(a_Name, a_fEflUndef, a_uExtra) \
202 { RT_XSTR(a_Name) "_intel", iemAImpl_ ## a_Name ## _intel, iemAImpl_ ## a_Name, \
203 g_aTests_ ## a_Name ## _intel, RT_ELEMENTS(g_aTests_ ## a_Name ## _intel), \
204 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_INTEL }
205
206#define ENTRY_AMD(a_Name, a_fEflUndef) ENTRY_AMD_EX(a_Name, a_fEflUndef, 0)
207#define ENTRY_AMD_EX(a_Name, a_fEflUndef, a_uExtra) \
208 { RT_XSTR(a_Name) "_amd", iemAImpl_ ## a_Name ## _amd, iemAImpl_ ## a_Name, \
209 g_aTests_ ## a_Name ## _amd, RT_ELEMENTS(g_aTests_ ## a_Name ## _amd), \
210 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_AMD }
211
212
213/*********************************************************************************************************************************
214* Global Variables *
215*********************************************************************************************************************************/
216static RTTEST g_hTest;
217static uint8_t g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
218#ifdef TSTIEMAIMPL_WITH_GENERATOR
219static uint32_t g_cZeroDstTests = 2;
220static uint32_t g_cZeroSrcTests = 4;
221#endif
222static uint8_t *g_pu8, *g_pu8Two;
223static uint16_t *g_pu16, *g_pu16Two;
224static uint32_t *g_pu32, *g_pu32Two, *g_pfEfl;
225static uint64_t *g_pu64, *g_pu64Two;
226static RTUINT128U *g_pu128, *g_pu128Two;
227
228static char g_aszBuf[16][256];
229static unsigned g_idxBuf = 0;
230
231
232#include "tstIEMAImplData.h"
233#include "tstIEMAImplData-Intel.h"
234#include "tstIEMAImplData-Amd.h"
235
236
237/*********************************************************************************************************************************
238* Internal Functions *
239*********************************************************************************************************************************/
240static const char *FormatR80(PCRTFLOAT80U pr80);
241static const char *FormatR64(PCRTFLOAT64U pr64);
242static const char *FormatR32(PCRTFLOAT32U pr32);
243
244
245/*
246 * Random helpers.
247 */
248
249static uint32_t RandEFlags(void)
250{
251 uint32_t fEfl = RTRandU32();
252 return (fEfl & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK;
253}
254
255
256static uint8_t RandU8(void)
257{
258 return RTRandU32Ex(0, 0xff);
259}
260
261
262static uint16_t RandU16(void)
263{
264 return RTRandU32Ex(0, 0xffff);
265}
266
267
268static uint32_t RandU32(void)
269{
270 return RTRandU32();
271}
272
273
274static uint64_t RandU64(void)
275{
276 return RTRandU64();
277}
278
279
280static RTUINT128U RandU128(void)
281{
282 RTUINT128U Ret;
283 Ret.s.Hi = RTRandU64();
284 Ret.s.Lo = RTRandU64();
285 return Ret;
286}
287
288#ifdef TSTIEMAIMPL_WITH_GENERATOR
289
290static uint8_t RandU8Dst(uint32_t iTest)
291{
292 if (iTest < g_cZeroDstTests)
293 return 0;
294 return RandU8();
295}
296
297
298static uint8_t RandU8Src(uint32_t iTest)
299{
300 if (iTest < g_cZeroSrcTests)
301 return 0;
302 return RandU8();
303}
304
305
306static uint16_t RandU16Dst(uint32_t iTest)
307{
308 if (iTest < g_cZeroDstTests)
309 return 0;
310 return RandU16();
311}
312
313
314static uint16_t RandU16Src(uint32_t iTest)
315{
316 if (iTest < g_cZeroSrcTests)
317 return 0;
318 return RandU16();
319}
320
321
322static uint32_t RandU32Dst(uint32_t iTest)
323{
324 if (iTest < g_cZeroDstTests)
325 return 0;
326 return RandU32();
327}
328
329
330static uint32_t RandU32Src(uint32_t iTest)
331{
332 if (iTest < g_cZeroSrcTests)
333 return 0;
334 return RandU32();
335}
336
337
338static uint64_t RandU64Dst(uint32_t iTest)
339{
340 if (iTest < g_cZeroDstTests)
341 return 0;
342 return RandU64();
343}
344
345
346static uint64_t RandU64Src(uint32_t iTest)
347{
348 if (iTest < g_cZeroSrcTests)
349 return 0;
350 return RandU64();
351}
352
353
354static void SafeR80FractionShift(PRTFLOAT80U pr80, uint8_t cShift)
355{
356 if (pr80->sj64.uFraction >= RT_BIT_64(cShift))
357 pr80->sj64.uFraction >>= cShift;
358 else
359 pr80->sj64.uFraction = (cShift % 19) + 1;
360}
361
362
363static RTFLOAT80U RandR80Ex(unsigned cTarget = 80)
364{
365 Assert(cTarget == 80 || cTarget == 64 || cTarget == 32);
366
367 RTFLOAT80U r80;
368 r80.au64[0] = RandU64();
369 r80.au16[4] = RandU16();
370
371 /*
372 * Make it more likely that we get a good selection of special values.
373 */
374 uint8_t bType = RandU8() & 0x1f;
375 if (bType == 0 || bType == 1 || bType == 2 || bType == 3)
376 {
377 /* Zero (0), Pseudo-Infinity (1), Infinity (2), Indefinite (3). We only keep fSign here. */
378 r80.sj64.uExponent = bType == 0 ? 0 : 0x7fff;
379 r80.sj64.uFraction = bType <= 2 ? 0 : RT_BIT_64(62);
380 r80.sj64.fInteger = bType >= 2 ? 1 : 0;
381 AssertMsg(bType != 0 || RTFLOAT80U_IS_ZERO(&r80), ("%s\n", FormatR80(&r80)));
382 AssertMsg(bType != 1 || RTFLOAT80U_IS_PSEUDO_INF(&r80), ("%s\n", FormatR80(&r80)));
383 AssertMsg(bType != 2 || RTFLOAT80U_IS_INF(&r80), ("%s\n", FormatR80(&r80)));
384 AssertMsg(bType != 3 || RTFLOAT80U_IS_INDEFINITE(&r80), ("%s\n", FormatR80(&r80)));
385 }
386 else if (bType == 4 || bType == 5 || bType == 6 || bType == 7)
387 {
388 /* Denormals (4,5) and Pseudo denormals (6,7) */
389 if (bType & 1)
390 SafeR80FractionShift(&r80, r80.sj64.uExponent % 62);
391 else if (r80.sj64.uFraction == 0 && bType < 6)
392 r80.sj64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT80U_FRACTION_BITS) - 1);
393 r80.sj64.uExponent = 0;
394 r80.sj64.fInteger = bType >= 6;
395 AssertMsg(bType >= 6 || RTFLOAT80U_IS_DENORMAL(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
396 AssertMsg(bType < 6 || RTFLOAT80U_IS_PSEUDO_DENORMAL(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
397 }
398 else if (bType == 8 || bType == 9)
399 {
400 /* Pseudo NaN. */
401 if (bType & 1)
402 SafeR80FractionShift(&r80, r80.sj64.uExponent % 62);
403 else if (r80.sj64.uFraction == 0 && !r80.sj64.fInteger)
404 r80.sj64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT80U_FRACTION_BITS) - 1);
405 r80.sj64.uExponent = 0x7fff;
406 if (r80.sj64.fInteger)
407 r80.sj64.uFraction |= RT_BIT_64(62);
408 else
409 r80.sj64.uFraction &= ~RT_BIT_64(62);
410 r80.sj64.fInteger = 0;
411 AssertMsg(RTFLOAT80U_IS_PSEUDO_NAN(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
412 AssertMsg(RTFLOAT80U_IS_NAN(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
413 }
414 else if (bType == 10 || bType == 11)
415 {
416 /* Quiet and signalling NaNs (using fInteger to pick which). */
417 if (bType & 1)
418 SafeR80FractionShift(&r80, r80.sj64.uExponent % 62);
419 else if (r80.sj64.uFraction == 0)
420 r80.sj64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT80U_FRACTION_BITS) - 1);
421 r80.sj64.uExponent = 0x7fff;
422 if (r80.sj64.fInteger)
423 r80.sj64.uFraction |= RT_BIT_64(62);
424 else
425 r80.sj64.uFraction &= ~RT_BIT_64(62);
426 r80.sj64.fInteger = 1;
427 AssertMsg(RTFLOAT80U_IS_SIGNALLING_NAN(&r80) || RTFLOAT80U_IS_QUIET_NAN(&r80), ("%s\n", FormatR80(&r80)));
428 AssertMsg(RTFLOAT80U_IS_NAN(&r80), ("%s\n", FormatR80(&r80)));
429 }
430 else if (bType == 12 || bType == 13)
431 {
432 /* Unnormals */
433 if (bType & 1)
434 SafeR80FractionShift(&r80, RandU8() % 62);
435 r80.sj64.fInteger = 0;
436 AssertMsg(RTFLOAT80U_IS_UNNORMAL(&r80), ("%s\n", FormatR80(&r80)));
437 }
438 else if (bType < 24)
439 {
440 /* Make sure we have lots of normalized values. */
441 const unsigned uMinExp = cTarget == 64 ? RTFLOAT80U_EXP_BIAS - RTFLOAT64U_EXP_BIAS
442 : cTarget == 32 ? RTFLOAT80U_EXP_BIAS - RTFLOAT32U_EXP_BIAS : 0;
443 const unsigned uMaxExp = cTarget == 64 ? uMinExp + RTFLOAT64U_EXP_MAX
444 : cTarget == 32 ? uMinExp + RTFLOAT32U_EXP_MAX : RTFLOAT80U_EXP_MAX;
445 r80.sj64.fInteger = 1;
446 if (r80.sj64.uExponent <= uMinExp)
447 r80.sj64.uExponent = uMinExp + 1;
448 else if (r80.sj64.uExponent >= uMaxExp)
449 r80.sj64.uExponent = uMaxExp - 1;
450
451 if (bType == 14)
452 { /* All 1s is useful to testing rounding. Also try trigger special
453 behaviour by sometimes rounding out of range, while we're at it. */
454 r80.sj64.uFraction = RT_BIT_64(63) - 1;
455 uint8_t bExp = RandU8();
456 if ((bExp & 3) == 0)
457 r80.sj64.uExponent = uMaxExp - 1;
458 else if ((bExp & 3) == 1)
459 r80.sj64.uExponent = uMinExp + 1;
460 else if ((bExp & 3) == 2)
461 r80.sj64.uExponent = uMinExp - (bExp & 15); /* (small numbers are mapped to subnormal values) */
462 }
463
464 AssertMsg(RTFLOAT80U_IS_NORMAL(&r80), ("%s\n", FormatR80(&r80)));
465 }
466 return r80;
467}
468
469
470static RTFLOAT80U RandR80Src(uint32_t iTest)
471{
472 RT_NOREF(iTest);
473 return RandR80Ex();
474}
475
476
477static void SafeR64FractionShift(PRTFLOAT64U pr64, uint8_t cShift)
478{
479 if (pr64->s64.uFraction >= RT_BIT_64(cShift))
480 pr64->s64.uFraction >>= cShift;
481 else
482 pr64->s64.uFraction = (cShift % 19) + 1;
483}
484
485
486static RTFLOAT64U RandR64Src(uint32_t iTest)
487{
488 RT_NOREF(iTest);
489
490 RTFLOAT64U r64;
491 r64.u = RandU64();
492
493 /*
494 * Make it more likely that we get a good selection of special values.
495 * On average 6 out of 16 calls should return a special value.
496 */
497 uint8_t bType = RandU8() & 0xf;
498 if (bType == 0 || bType == 1)
499 {
500 /* 0 or Infinity. We only keep fSign here. */
501 r64.s.uExponent = bType == 0 ? 0 : 0x7ff;
502 r64.s.uFractionHigh = 0;
503 r64.s.uFractionLow = 0;
504 AssertMsg(bType != 0 || RTFLOAT64U_IS_ZERO(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
505 AssertMsg(bType != 1 || RTFLOAT64U_IS_INF(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
506 }
507 else if (bType == 2 || bType == 3)
508 {
509 /* Subnormals */
510 if (bType == 3)
511 SafeR64FractionShift(&r64, r64.s64.uExponent % 51);
512 else if (r64.s64.uFraction == 0)
513 r64.s64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT64U_FRACTION_BITS) - 1);
514 r64.s64.uExponent = 0;
515 AssertMsg(RTFLOAT64U_IS_SUBNORMAL(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
516 }
517 else if (bType == 4 || bType == 5)
518 {
519 /* NaNs */
520 if (bType == 5)
521 SafeR64FractionShift(&r64, r64.s64.uExponent % 51);
522 else if (r64.s64.uFraction == 0)
523 r64.s64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT64U_FRACTION_BITS) - 1);
524 r64.s64.uExponent = 0x7ff;
525 AssertMsg(RTFLOAT64U_IS_NAN(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
526 }
527 else if (bType < 12)
528 {
529 /* Make sure we have lots of normalized values. */
530 if (r64.s.uExponent == 0)
531 r64.s.uExponent = 1;
532 else if (r64.s.uExponent == 0x7ff)
533 r64.s.uExponent = 0x7fe;
534 AssertMsg(RTFLOAT64U_IS_NORMAL(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
535 }
536 return r64;
537}
538
539
540static void SafeR32FractionShift(PRTFLOAT32U pr32, uint8_t cShift)
541{
542 if (pr32->s.uFraction >= RT_BIT_32(cShift))
543 pr32->s.uFraction >>= cShift;
544 else
545 pr32->s.uFraction = (cShift % 19) + 1;
546}
547
548
549static RTFLOAT32U RandR32Src(uint32_t iTest)
550{
551 RT_NOREF(iTest);
552
553 RTFLOAT32U r32;
554 r32.u = RandU32();
555
556 /*
557 * Make it more likely that we get a good selection of special values.
558 * On average 6 out of 16 calls should return a special value.
559 */
560 uint8_t bType = RandU8() & 0xf;
561 if (bType == 0 || bType == 1)
562 {
563 /* 0 or Infinity. We only keep fSign here. */
564 r32.s.uExponent = bType == 0 ? 0 : 0xff;
565 r32.s.uFraction = 0;
566 AssertMsg(bType != 0 || RTFLOAT32U_IS_ZERO(&r32), ("%s\n", FormatR32(&r32)));
567 AssertMsg(bType != 1 || RTFLOAT32U_IS_INF(&r32), ("%s\n", FormatR32(&r32)));
568 }
569 else if (bType == 2 || bType == 3)
570 {
571 /* Subnormals */
572 if (bType == 3)
573 SafeR32FractionShift(&r32, r32.s.uExponent % 22);
574 else if (r32.s.uFraction == 0)
575 r32.s.uFraction = RTRandU32Ex(1, RT_BIT_32(RTFLOAT32U_FRACTION_BITS) - 1);
576 r32.s.uExponent = 0;
577 AssertMsg(RTFLOAT32U_IS_SUBNORMAL(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
578 }
579 else if (bType == 4 || bType == 5)
580 {
581 /* NaNs */
582 if (bType == 5)
583 SafeR32FractionShift(&r32, r32.s.uExponent % 22);
584 else if (r32.s.uFraction == 0)
585 r32.s.uFraction = RTRandU32Ex(1, RT_BIT_32(RTFLOAT32U_FRACTION_BITS) - 1);
586 r32.s.uExponent = 0xff;
587 AssertMsg(RTFLOAT32U_IS_NAN(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
588 }
589 else if (bType < 12)
590 {
591 /* Make sure we have lots of normalized values. */
592 if (r32.s.uExponent == 0)
593 r32.s.uExponent = 1;
594 else if (r32.s.uExponent == 0xff)
595 r32.s.uExponent = 0xfe;
596 AssertMsg(RTFLOAT32U_IS_NORMAL(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
597 }
598 return r32;
599}
600
601
602const char *GenFormatR80(PCRTFLOAT80U plrd)
603{
604 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
605 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTFLOAT80U_INIT_C(%d,%#RX64,%u)",
606 plrd->s.fSign, plrd->s.uMantissa, plrd->s.uExponent);
607 return pszBuf;
608}
609
610const char *GenFormatR64(PCRTFLOAT64U prd)
611{
612 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
613 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTFLOAT64U_INIT_C(%d,%#RX64,%u)",
614 prd->s.fSign, RT_MAKE_U64(prd->s.uFractionLow, prd->s.uFractionHigh), prd->s.uExponent);
615 return pszBuf;
616}
617
618
619const char *GenFormatR32(PCRTFLOAT32U pr)
620{
621 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
622 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTFLOAT32U_INIT_C(%d,%#RX32,%u)", pr->s.fSign, pr->s.uFraction, pr->s.uExponent);
623 return pszBuf;
624}
625
626
627static void GenerateHeader(PRTSTREAM pOut, const char *pszFileInfix,
628 const char *pszCpuDesc, const char *pszCpuType, const char *pszCpuSuffU)
629{
630 /* We want to tag the generated source code with the revision that produced it. */
631 static char s_szRev[] = "$Revision: 94339 $";
632 const char *pszRev = RTStrStripL(strchr(s_szRev, ':') + 1);
633 size_t cchRev = 0;
634 while (RT_C_IS_DIGIT(pszRev[cchRev]))
635 cchRev++;
636
637 RTStrmPrintf(pOut,
638 "/* $Id: tstIEMAImpl.cpp 94339 2022-03-23 14:01:48Z vboxsync $ */\n"
639 "/** @file\n"
640 " * IEM Assembly Instruction Helper Testcase Data%s%s - r%.*s on %s.\n"
641 " */\n"
642 "\n"
643 "/*\n"
644 " * Copyright (C) 2022 Oracle Corporation\n"
645 " *\n"
646 " * This file is part of VirtualBox Open Source Edition (OSE), as\n"
647 " * available from http://www.virtualbox.org. This file is free software;\n"
648 " * you can redistribute it and/or modify it under the terms of the GNU\n"
649 " * General Public License (GPL) as published by the Free Software\n"
650 " * Foundation, in version 2 as it comes in the \"COPYING\" file of the\n"
651 " * VirtualBox OSE distribution. VirtualBox OSE is distributed in the\n"
652 " * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.\n"
653 " */\n"
654 "\n"
655 "#ifndef VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s%s_h\n"
656 "#define VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s%s_h\n"
657 "#ifndef RT_WITHOUT_PRAGMA_ONCE\n"
658 "# pragma once\n"
659 "#endif\n"
660 ,
661 pszCpuType ? " " : "", pszCpuType ? pszCpuType : "", cchRev, pszRev, pszCpuDesc,
662 pszFileInfix, pszCpuSuffU,
663 pszFileInfix, pszCpuSuffU);
664}
665
666
667static RTEXITCODE GenerateFooterAndClose(PRTSTREAM pOut, const char *pszFilename, const char *pszFileInfix,
668 const char *pszCpuSuff, RTEXITCODE rcExit)
669{
670 RTStrmPrintf(pOut,
671 "\n"
672 "#endif /* !VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s%s_h */\n", pszFileInfix, pszCpuSuff);
673 int rc = RTStrmClose(pOut);
674 if (RT_SUCCESS(rc))
675 return rcExit;
676 return RTMsgErrorExitFailure("RTStrmClose failed on %s: %Rrc", pszFilename, rc);
677}
678
679#endif
680
681
682/*
683 * Test helpers.
684 */
685static const char *EFlagsDiff(uint32_t fActual, uint32_t fExpected)
686{
687 if (fActual == fExpected)
688 return "";
689
690 uint32_t const fXor = fActual ^ fExpected;
691 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
692 size_t cch = RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), " - %#x", fXor);
693
694 static struct
695 {
696 const char *pszName;
697 uint32_t fFlag;
698 } const s_aFlags[] =
699 {
700#define EFL_ENTRY(a_Flags) { #a_Flags, X86_EFL_ ## a_Flags }
701 EFL_ENTRY(CF),
702 EFL_ENTRY(PF),
703 EFL_ENTRY(AF),
704 EFL_ENTRY(ZF),
705 EFL_ENTRY(SF),
706 EFL_ENTRY(TF),
707 EFL_ENTRY(IF),
708 EFL_ENTRY(DF),
709 EFL_ENTRY(OF),
710 EFL_ENTRY(IOPL),
711 EFL_ENTRY(NT),
712 EFL_ENTRY(RF),
713 EFL_ENTRY(VM),
714 EFL_ENTRY(AC),
715 EFL_ENTRY(VIF),
716 EFL_ENTRY(VIP),
717 EFL_ENTRY(ID),
718 };
719 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
720 if (s_aFlags[i].fFlag & fXor)
721 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch,
722 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
723 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
724 return pszBuf;
725}
726
727
728static const char *FswDiff(uint16_t fActual, uint16_t fExpected)
729{
730 if (fActual == fExpected)
731 return "";
732
733 uint16_t const fXor = fActual ^ fExpected;
734 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
735 size_t cch = RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), " - %#x", fXor);
736
737 static struct
738 {
739 const char *pszName;
740 uint32_t fFlag;
741 } const s_aFlags[] =
742 {
743#define FSW_ENTRY(a_Flags) { #a_Flags, X86_FSW_ ## a_Flags }
744 FSW_ENTRY(IE),
745 FSW_ENTRY(DE),
746 FSW_ENTRY(ZE),
747 FSW_ENTRY(OE),
748 FSW_ENTRY(UE),
749 FSW_ENTRY(PE),
750 FSW_ENTRY(SF),
751 FSW_ENTRY(ES),
752 FSW_ENTRY(C0),
753 FSW_ENTRY(C1),
754 FSW_ENTRY(C2),
755 FSW_ENTRY(C3),
756 FSW_ENTRY(B),
757 };
758 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
759 if (s_aFlags[i].fFlag & fXor)
760 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch,
761 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
762 if (fXor & X86_FSW_TOP_MASK)
763 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "/TOP%u!%u",
764 X86_FSW_TOP_GET(fActual), X86_FSW_TOP_GET(fExpected));
765 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
766 return pszBuf;
767}
768
769
770static const char *FormatFcw(uint16_t fFcw)
771{
772 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
773
774 const char *pszPC = NULL; /* (msc+gcc are too stupid) */
775 switch (fFcw & X86_FCW_PC_MASK)
776 {
777 case X86_FCW_PC_24: pszPC = "PC24"; break;
778 case X86_FCW_PC_RSVD: pszPC = "PCRSVD!"; break;
779 case X86_FCW_PC_53: pszPC = "PC53"; break;
780 case X86_FCW_PC_64: pszPC = "PC64"; break;
781 }
782
783 const char *pszRC = NULL; /* (msc+gcc are too stupid) */
784 switch (fFcw & X86_FCW_RC_MASK)
785 {
786 case X86_FCW_RC_NEAREST: pszRC = "NEAR"; break;
787 case X86_FCW_RC_DOWN: pszRC = "DOWN"; break;
788 case X86_FCW_RC_UP: pszRC = "UP"; break;
789 case X86_FCW_RC_ZERO: pszRC = "ZERO"; break;
790 }
791 size_t cch = RTStrPrintf(&pszBuf[0], sizeof(g_aszBuf[0]), "%s %s", pszPC, pszRC);
792
793 static struct
794 {
795 const char *pszName;
796 uint32_t fFlag;
797 } const s_aFlags[] =
798 {
799#define FCW_ENTRY(a_Flags) { #a_Flags, X86_FCW_ ## a_Flags }
800 FCW_ENTRY(IM),
801 FCW_ENTRY(DM),
802 FCW_ENTRY(ZM),
803 FCW_ENTRY(OM),
804 FCW_ENTRY(UM),
805 FCW_ENTRY(PM),
806 { "6M", 64 },
807 };
808 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
809 if (fFcw & s_aFlags[i].fFlag)
810 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, " %s", s_aFlags[i].pszName);
811
812 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
813 return pszBuf;
814}
815
816
817static const char *FormatR80(PCRTFLOAT80U pr80)
818{
819 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
820 RTStrFormatR80(pszBuf, sizeof(g_aszBuf[0]), pr80, 0, 0, RTSTR_F_SPECIAL);
821 return pszBuf;
822}
823
824
825static const char *FormatR64(PCRTFLOAT64U pr64)
826{
827 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
828 RTStrFormatR64(pszBuf, sizeof(g_aszBuf[0]), pr64, 0, 0, RTSTR_F_SPECIAL);
829 return pszBuf;
830}
831
832
833static const char *FormatR32(PCRTFLOAT32U pr32)
834{
835 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
836 RTStrFormatR32(pszBuf, sizeof(g_aszBuf[0]), pr32, 0, 0, RTSTR_F_SPECIAL);
837 return pszBuf;
838}
839
840
841/*
842 * Binary operations.
843 */
844#ifdef TSTIEMAIMPL_WITH_GENERATOR
845# define GEN_BINARY_TESTS(a_cBits, a_Fmt) \
846static void BinU ## a_cBits ## Generate(PRTSTREAM pOut, PRTSTREAM pOutCpu, const char *pszCpuSuffU, uint32_t cTests) \
847{ \
848 RTStrmPrintf(pOut, "\n\n#define HAVE_BINU%u_TESTS\n", a_cBits); \
849 RTStrmPrintf(pOutCpu, "\n\n#define HAVE_BINU%u_TESTS%s\n", a_cBits, pszCpuSuffU); \
850 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aBinU ## a_cBits); iFn++) \
851 { \
852 PFNIEMAIMPLBINU ## a_cBits const pfn = g_aBinU ## a_cBits[iFn].pfnNative \
853 ? g_aBinU ## a_cBits[iFn].pfnNative : g_aBinU ## a_cBits[iFn].pfn; \
854 PRTSTREAM pOutFn = pOut; \
855 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE) \
856 { \
857 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
858 continue; \
859 pOutFn = pOutCpu; \
860 } \
861 \
862 RTStrmPrintf(pOutFn, "static const BINU%u_TEST_T g_aTests_%s[] =\n{\n", a_cBits, g_aBinU ## a_cBits[iFn].pszName); \
863 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
864 { \
865 BINU ## a_cBits ## _TEST_T Test; \
866 Test.fEflIn = RandEFlags(); \
867 Test.fEflOut = Test.fEflIn; \
868 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
869 Test.uDstOut = Test.uDstIn; \
870 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
871 if (g_aBinU ## a_cBits[iFn].uExtra) \
872 Test.uSrcIn &= a_cBits - 1; /* Restrict bit index according to operand width */ \
873 Test.uMisc = 0; \
874 pfn(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); \
875 RTStrmPrintf(pOutFn, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %#x }, /* #%u */\n", \
876 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
877 } \
878 RTStrmPrintf(pOutFn, "};\n"); \
879 } \
880}
881#else
882# define GEN_BINARY_TESTS(a_cBits, a_Fmt)
883#endif
884
885#define TEST_BINARY_OPS(a_cBits, a_uType, a_Fmt, a_aSubTests) \
886GEN_BINARY_TESTS(a_cBits, a_Fmt) \
887\
888static void BinU ## a_cBits ## Test(void) \
889{ \
890 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
891 { \
892 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
893 BINU ## a_cBits ## _TEST_T const * const paTests = a_aSubTests[iFn].paTests; \
894 uint32_t const cTests = a_aSubTests[iFn].cTests; \
895 PFNIEMAIMPLBINU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
896 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
897 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
898 { \
899 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
900 { \
901 uint32_t fEfl = paTests[iTest].fEflIn; \
902 a_uType uDst = paTests[iTest].uDstIn; \
903 pfn(&uDst, paTests[iTest].uSrcIn, &fEfl); \
904 if ( uDst != paTests[iTest].uDstOut \
905 || fEfl != paTests[iTest].fEflOut) \
906 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s - %s\n", \
907 iTest, !iVar ? "" : "/n", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
908 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
909 EFlagsDiff(fEfl, paTests[iTest].fEflOut), \
910 uDst == paTests[iTest].uDstOut ? "eflags" : fEfl == paTests[iTest].fEflOut ? "dst" : "both"); \
911 else \
912 { \
913 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
914 *g_pfEfl = paTests[iTest].fEflIn; \
915 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, g_pfEfl); \
916 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
917 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
918 } \
919 } \
920 pfn = a_aSubTests[iFn].pfnNative; \
921 } \
922 } \
923}
924
925
926/*
927 * 8-bit binary operations.
928 */
929
930#ifndef HAVE_BINU8_TESTS
931static const BINU8_TEST_T g_aTests_add_u8[] = { {0} };
932static const BINU8_TEST_T g_aTests_add_u8_locked[] = { {0} };
933static const BINU8_TEST_T g_aTests_adc_u8[] = { {0} };
934static const BINU8_TEST_T g_aTests_adc_u8_locked[] = { {0} };
935static const BINU8_TEST_T g_aTests_sub_u8[] = { {0} };
936static const BINU8_TEST_T g_aTests_sub_u8_locked[] = { {0} };
937static const BINU8_TEST_T g_aTests_sbb_u8[] = { {0} };
938static const BINU8_TEST_T g_aTests_sbb_u8_locked[] = { {0} };
939static const BINU8_TEST_T g_aTests_or_u8[] = { {0} };
940static const BINU8_TEST_T g_aTests_or_u8_locked[] = { {0} };
941static const BINU8_TEST_T g_aTests_xor_u8[] = { {0} };
942static const BINU8_TEST_T g_aTests_xor_u8_locked[] = { {0} };
943static const BINU8_TEST_T g_aTests_and_u8[] = { {0} };
944static const BINU8_TEST_T g_aTests_and_u8_locked[] = { {0} };
945static const BINU8_TEST_T g_aTests_cmp_u8[] = { {0} };
946static const BINU8_TEST_T g_aTests_test_u8[] = { {0} };
947#endif
948
949static const BINU8_T g_aBinU8[] =
950{
951 ENTRY(add_u8),
952 ENTRY(add_u8_locked),
953 ENTRY(adc_u8),
954 ENTRY(adc_u8_locked),
955 ENTRY(sub_u8),
956 ENTRY(sub_u8_locked),
957 ENTRY(sbb_u8),
958 ENTRY(sbb_u8_locked),
959 ENTRY(or_u8),
960 ENTRY(or_u8_locked),
961 ENTRY(xor_u8),
962 ENTRY(xor_u8_locked),
963 ENTRY(and_u8),
964 ENTRY(and_u8_locked),
965 ENTRY(cmp_u8),
966 ENTRY(test_u8),
967};
968
969TEST_BINARY_OPS(8, uint8_t, "%#04x", g_aBinU8)
970
971
972/*
973 * 16-bit binary operations.
974 */
975
976#ifndef HAVE_BINU16_TESTS
977static const BINU16_TEST_T g_aTests_add_u16[] = { {0} };
978static const BINU16_TEST_T g_aTests_add_u16_locked[] = { {0} };
979static const BINU16_TEST_T g_aTests_adc_u16[] = { {0} };
980static const BINU16_TEST_T g_aTests_adc_u16_locked[] = { {0} };
981static const BINU16_TEST_T g_aTests_sub_u16[] = { {0} };
982static const BINU16_TEST_T g_aTests_sub_u16_locked[] = { {0} };
983static const BINU16_TEST_T g_aTests_sbb_u16[] = { {0} };
984static const BINU16_TEST_T g_aTests_sbb_u16_locked[] = { {0} };
985static const BINU16_TEST_T g_aTests_or_u16[] = { {0} };
986static const BINU16_TEST_T g_aTests_or_u16_locked[] = { {0} };
987static const BINU16_TEST_T g_aTests_xor_u16[] = { {0} };
988static const BINU16_TEST_T g_aTests_xor_u16_locked[] = { {0} };
989static const BINU16_TEST_T g_aTests_and_u16[] = { {0} };
990static const BINU16_TEST_T g_aTests_and_u16_locked[] = { {0} };
991static const BINU16_TEST_T g_aTests_cmp_u16[] = { {0} };
992static const BINU16_TEST_T g_aTests_test_u16[] = { {0} };
993static const BINU16_TEST_T g_aTests_bt_u16[] = { {0} };
994static const BINU16_TEST_T g_aTests_btc_u16[] = { {0} };
995static const BINU16_TEST_T g_aTests_btc_u16_locked[] = { {0} };
996static const BINU16_TEST_T g_aTests_btr_u16[] = { {0} };
997static const BINU16_TEST_T g_aTests_btr_u16_locked[] = { {0} };
998static const BINU16_TEST_T g_aTests_bts_u16[] = { {0} };
999static const BINU16_TEST_T g_aTests_bts_u16_locked[] = { {0} };
1000static const BINU16_TEST_T g_aTests_arpl[] = { {0} };
1001#endif
1002#ifndef HAVE_BINU16_TESTS_AMD
1003static const BINU16_TEST_T g_aTests_bsf_u16_amd[] = { {0} };
1004static const BINU16_TEST_T g_aTests_bsr_u16_amd[] = { {0} };
1005static const BINU16_TEST_T g_aTests_imul_two_u16_amd[] = { {0} };
1006#endif
1007#ifndef HAVE_BINU16_TESTS_INTEL
1008static const BINU16_TEST_T g_aTests_bsf_u16_intel[] = { {0} };
1009static const BINU16_TEST_T g_aTests_bsr_u16_intel[] = { {0} };
1010static const BINU16_TEST_T g_aTests_imul_two_u16_intel[] = { {0} };
1011#endif
1012
1013static const BINU16_T g_aBinU16[] =
1014{
1015 ENTRY(add_u16),
1016 ENTRY(add_u16_locked),
1017 ENTRY(adc_u16),
1018 ENTRY(adc_u16_locked),
1019 ENTRY(sub_u16),
1020 ENTRY(sub_u16_locked),
1021 ENTRY(sbb_u16),
1022 ENTRY(sbb_u16_locked),
1023 ENTRY(or_u16),
1024 ENTRY(or_u16_locked),
1025 ENTRY(xor_u16),
1026 ENTRY(xor_u16_locked),
1027 ENTRY(and_u16),
1028 ENTRY(and_u16_locked),
1029 ENTRY(cmp_u16),
1030 ENTRY(test_u16),
1031 ENTRY_EX(bt_u16, 1),
1032 ENTRY_EX(btc_u16, 1),
1033 ENTRY_EX(btc_u16_locked, 1),
1034 ENTRY_EX(btr_u16, 1),
1035 ENTRY_EX(btr_u16_locked, 1),
1036 ENTRY_EX(bts_u16, 1),
1037 ENTRY_EX(bts_u16_locked, 1),
1038 ENTRY_AMD( bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1039 ENTRY_INTEL(bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1040 ENTRY_AMD( bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1041 ENTRY_INTEL(bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1042 ENTRY_AMD( imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1043 ENTRY_INTEL(imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1044 ENTRY(arpl),
1045};
1046
1047TEST_BINARY_OPS(16, uint16_t, "%#06x", g_aBinU16)
1048
1049
1050/*
1051 * 32-bit binary operations.
1052 */
1053
1054#ifndef HAVE_BINU32_TESTS
1055static const BINU32_TEST_T g_aTests_add_u32[] = { {0} };
1056static const BINU32_TEST_T g_aTests_add_u32_locked[] = { {0} };
1057static const BINU32_TEST_T g_aTests_adc_u32[] = { {0} };
1058static const BINU32_TEST_T g_aTests_adc_u32_locked[] = { {0} };
1059static const BINU32_TEST_T g_aTests_sub_u32[] = { {0} };
1060static const BINU32_TEST_T g_aTests_sub_u32_locked[] = { {0} };
1061static const BINU32_TEST_T g_aTests_sbb_u32[] = { {0} };
1062static const BINU32_TEST_T g_aTests_sbb_u32_locked[] = { {0} };
1063static const BINU32_TEST_T g_aTests_or_u32[] = { {0} };
1064static const BINU32_TEST_T g_aTests_or_u32_locked[] = { {0} };
1065static const BINU32_TEST_T g_aTests_xor_u32[] = { {0} };
1066static const BINU32_TEST_T g_aTests_xor_u32_locked[] = { {0} };
1067static const BINU32_TEST_T g_aTests_and_u32[] = { {0} };
1068static const BINU32_TEST_T g_aTests_and_u32_locked[] = { {0} };
1069static const BINU32_TEST_T g_aTests_cmp_u32[] = { {0} };
1070static const BINU32_TEST_T g_aTests_test_u32[] = { {0} };
1071static const BINU32_TEST_T g_aTests_bt_u32[] = { {0} };
1072static const BINU32_TEST_T g_aTests_btc_u32[] = { {0} };
1073static const BINU32_TEST_T g_aTests_btc_u32_locked[] = { {0} };
1074static const BINU32_TEST_T g_aTests_btr_u32[] = { {0} };
1075static const BINU32_TEST_T g_aTests_btr_u32_locked[] = { {0} };
1076static const BINU32_TEST_T g_aTests_bts_u32[] = { {0} };
1077static const BINU32_TEST_T g_aTests_bts_u32_locked[] = { {0} };
1078#endif
1079#ifndef HAVE_BINU32_TESTS_AMD
1080static const BINU32_TEST_T g_aTests_bsf_u32_amd[] = { {0} };
1081static const BINU32_TEST_T g_aTests_bsr_u32_amd[] = { {0} };
1082static const BINU32_TEST_T g_aTests_imul_two_u32_amd[] = { {0} };
1083#endif
1084#ifndef HAVE_BINU32_TESTS_INTEL
1085static const BINU32_TEST_T g_aTests_bsf_u32_intel[] = { {0} };
1086static const BINU32_TEST_T g_aTests_bsr_u32_intel[] = { {0} };
1087static const BINU32_TEST_T g_aTests_imul_two_u32_intel[] = { {0} };
1088#endif
1089
1090static const BINU32_T g_aBinU32[] =
1091{
1092 ENTRY(add_u32),
1093 ENTRY(add_u32_locked),
1094 ENTRY(adc_u32),
1095 ENTRY(adc_u32_locked),
1096 ENTRY(sub_u32),
1097 ENTRY(sub_u32_locked),
1098 ENTRY(sbb_u32),
1099 ENTRY(sbb_u32_locked),
1100 ENTRY(or_u32),
1101 ENTRY(or_u32_locked),
1102 ENTRY(xor_u32),
1103 ENTRY(xor_u32_locked),
1104 ENTRY(and_u32),
1105 ENTRY(and_u32_locked),
1106 ENTRY(cmp_u32),
1107 ENTRY(test_u32),
1108 ENTRY_EX(bt_u32, 1),
1109 ENTRY_EX(btc_u32, 1),
1110 ENTRY_EX(btc_u32_locked, 1),
1111 ENTRY_EX(btr_u32, 1),
1112 ENTRY_EX(btr_u32_locked, 1),
1113 ENTRY_EX(bts_u32, 1),
1114 ENTRY_EX(bts_u32_locked, 1),
1115 ENTRY_AMD( bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1116 ENTRY_INTEL(bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1117 ENTRY_AMD( bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1118 ENTRY_INTEL(bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1119 ENTRY_AMD( imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1120 ENTRY_INTEL(imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1121};
1122
1123TEST_BINARY_OPS(32, uint32_t, "%#010RX32", g_aBinU32)
1124
1125
1126/*
1127 * 64-bit binary operations.
1128 */
1129
1130#ifndef HAVE_BINU64_TESTS
1131static const BINU64_TEST_T g_aTests_add_u64[] = { {0} };
1132static const BINU64_TEST_T g_aTests_add_u64_locked[] = { {0} };
1133static const BINU64_TEST_T g_aTests_adc_u64[] = { {0} };
1134static const BINU64_TEST_T g_aTests_adc_u64_locked[] = { {0} };
1135static const BINU64_TEST_T g_aTests_sub_u64[] = { {0} };
1136static const BINU64_TEST_T g_aTests_sub_u64_locked[] = { {0} };
1137static const BINU64_TEST_T g_aTests_sbb_u64[] = { {0} };
1138static const BINU64_TEST_T g_aTests_sbb_u64_locked[] = { {0} };
1139static const BINU64_TEST_T g_aTests_or_u64[] = { {0} };
1140static const BINU64_TEST_T g_aTests_or_u64_locked[] = { {0} };
1141static const BINU64_TEST_T g_aTests_xor_u64[] = { {0} };
1142static const BINU64_TEST_T g_aTests_xor_u64_locked[] = { {0} };
1143static const BINU64_TEST_T g_aTests_and_u64[] = { {0} };
1144static const BINU64_TEST_T g_aTests_and_u64_locked[] = { {0} };
1145static const BINU64_TEST_T g_aTests_cmp_u64[] = { {0} };
1146static const BINU64_TEST_T g_aTests_test_u64[] = { {0} };
1147static const BINU64_TEST_T g_aTests_bt_u64[] = { {0} };
1148static const BINU64_TEST_T g_aTests_btc_u64[] = { {0} };
1149static const BINU64_TEST_T g_aTests_btc_u64_locked[] = { {0} };
1150static const BINU64_TEST_T g_aTests_btr_u64[] = { {0} };
1151static const BINU64_TEST_T g_aTests_btr_u64_locked[] = { {0} };
1152static const BINU64_TEST_T g_aTests_bts_u64[] = { {0} };
1153static const BINU64_TEST_T g_aTests_bts_u64_locked[] = { {0} };
1154#endif
1155#ifndef HAVE_BINU64_TESTS_AMD
1156static const BINU64_TEST_T g_aTests_bsf_u64_amd[] = { {0} };
1157static const BINU64_TEST_T g_aTests_bsr_u64_amd[] = { {0} };
1158static const BINU64_TEST_T g_aTests_imul_two_u64_amd[] = { {0} };
1159#endif
1160#ifndef HAVE_BINU64_TESTS_INTEL
1161static const BINU64_TEST_T g_aTests_bsf_u64_intel[] = { {0} };
1162static const BINU64_TEST_T g_aTests_bsr_u64_intel[] = { {0} };
1163static const BINU64_TEST_T g_aTests_imul_two_u64_intel[] = { {0} };
1164#endif
1165
1166static const BINU64_T g_aBinU64[] =
1167{
1168 ENTRY(add_u64),
1169 ENTRY(add_u64_locked),
1170 ENTRY(adc_u64),
1171 ENTRY(adc_u64_locked),
1172 ENTRY(sub_u64),
1173 ENTRY(sub_u64_locked),
1174 ENTRY(sbb_u64),
1175 ENTRY(sbb_u64_locked),
1176 ENTRY(or_u64),
1177 ENTRY(or_u64_locked),
1178 ENTRY(xor_u64),
1179 ENTRY(xor_u64_locked),
1180 ENTRY(and_u64),
1181 ENTRY(and_u64_locked),
1182 ENTRY(cmp_u64),
1183 ENTRY(test_u64),
1184 ENTRY_EX(bt_u64, 1),
1185 ENTRY_EX(btc_u64, 1),
1186 ENTRY_EX(btc_u64_locked, 1),
1187 ENTRY_EX(btr_u64, 1),
1188 ENTRY_EX(btr_u64_locked, 1),
1189 ENTRY_EX(bts_u64, 1),
1190 ENTRY_EX(bts_u64_locked, 1),
1191 ENTRY_AMD( bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1192 ENTRY_INTEL(bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1193 ENTRY_AMD( bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1194 ENTRY_INTEL(bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1195 ENTRY_AMD( imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1196 ENTRY_INTEL(imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1197};
1198
1199TEST_BINARY_OPS(64, uint64_t, "%#018RX64", g_aBinU64)
1200
1201
1202/*
1203 * XCHG
1204 */
1205static void XchgTest(void)
1206{
1207 RTTestSub(g_hTest, "xchg");
1208 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU8, (uint8_t *pu8Mem, uint8_t *pu8Reg));
1209 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU16,(uint16_t *pu16Mem, uint16_t *pu16Reg));
1210 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU32,(uint32_t *pu32Mem, uint32_t *pu32Reg));
1211 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU64,(uint64_t *pu64Mem, uint64_t *pu64Reg));
1212
1213 static struct
1214 {
1215 uint8_t cb; uint64_t fMask;
1216 union
1217 {
1218 uintptr_t pfn;
1219 FNIEMAIMPLXCHGU8 *pfnU8;
1220 FNIEMAIMPLXCHGU16 *pfnU16;
1221 FNIEMAIMPLXCHGU32 *pfnU32;
1222 FNIEMAIMPLXCHGU64 *pfnU64;
1223 } u;
1224 }
1225 s_aXchgWorkers[] =
1226 {
1227 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_locked } },
1228 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_locked } },
1229 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_locked } },
1230 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_locked } },
1231 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_unlocked } },
1232 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_unlocked } },
1233 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_unlocked } },
1234 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_unlocked } },
1235 };
1236 for (size_t i = 0; i < RT_ELEMENTS(s_aXchgWorkers); i++)
1237 {
1238 RTUINT64U uIn1, uIn2, uMem, uDst;
1239 uMem.u = uIn1.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
1240 uDst.u = uIn2.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
1241 if (uIn1.u == uIn2.u)
1242 uDst.u = uIn2.u = ~uIn2.u;
1243
1244 switch (s_aXchgWorkers[i].cb)
1245 {
1246 case 1:
1247 s_aXchgWorkers[i].u.pfnU8(g_pu8, g_pu8Two);
1248 s_aXchgWorkers[i].u.pfnU8(&uMem.au8[0], &uDst.au8[0]);
1249 break;
1250 case 2:
1251 s_aXchgWorkers[i].u.pfnU16(g_pu16, g_pu16Two);
1252 s_aXchgWorkers[i].u.pfnU16(&uMem.Words.w0, &uDst.Words.w0);
1253 break;
1254 case 4:
1255 s_aXchgWorkers[i].u.pfnU32(g_pu32, g_pu32Two);
1256 s_aXchgWorkers[i].u.pfnU32(&uMem.DWords.dw0, &uDst.DWords.dw0);
1257 break;
1258 case 8:
1259 s_aXchgWorkers[i].u.pfnU64(g_pu64, g_pu64Two);
1260 s_aXchgWorkers[i].u.pfnU64(&uMem.u, &uDst.u);
1261 break;
1262 default: RTTestFailed(g_hTest, "%d\n", s_aXchgWorkers[i].cb); break;
1263 }
1264
1265 if (uMem.u != uIn2.u || uDst.u != uIn1.u)
1266 RTTestFailed(g_hTest, "i=%u: %#RX64, %#RX64 -> %#RX64, %#RX64\n", i, uIn1.u, uIn2.u, uMem.u, uDst.u);
1267 }
1268}
1269
1270
1271/*
1272 * XADD
1273 */
1274static void XaddTest(void)
1275{
1276#define TEST_XADD(a_cBits, a_Type, a_Fmt) do { \
1277 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXADDU ## a_cBits, (a_Type *, a_Type *, uint32_t *)); \
1278 static struct \
1279 { \
1280 const char *pszName; \
1281 FNIEMAIMPLXADDU ## a_cBits *pfn; \
1282 BINU ## a_cBits ## _TEST_T const *paTests; \
1283 uint32_t cTests; \
1284 } const s_aFuncs[] = \
1285 { \
1286 { "xadd_u" # a_cBits, iemAImpl_xadd_u ## a_cBits, \
1287 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
1288 { "xadd_u" # a_cBits "8_locked", iemAImpl_xadd_u ## a_cBits ## _locked, \
1289 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
1290 }; \
1291 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
1292 { \
1293 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
1294 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
1295 uint32_t const cTests = s_aFuncs[iFn].cTests; \
1296 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
1297 { \
1298 uint32_t fEfl = paTests[iTest].fEflIn; \
1299 a_Type uSrc = paTests[iTest].uSrcIn; \
1300 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1301 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uSrc, &fEfl); \
1302 if ( fEfl != paTests[iTest].fEflOut \
1303 || *g_pu ## a_cBits != paTests[iTest].uDstOut \
1304 || uSrc != paTests[iTest].uDstIn) \
1305 RTTestFailed(g_hTest, "%s/#%u: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt " src=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
1306 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
1307 fEfl, *g_pu ## a_cBits, uSrc, paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].uDstIn, \
1308 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1309 } \
1310 } \
1311 } while(0)
1312 TEST_XADD(8, uint8_t, "%#04x");
1313 TEST_XADD(16, uint16_t, "%#06x");
1314 TEST_XADD(32, uint32_t, "%#010RX32");
1315 TEST_XADD(64, uint64_t, "%#010RX64");
1316}
1317
1318
1319/*
1320 * CMPXCHG
1321 */
1322
1323static void CmpXchgTest(void)
1324{
1325#define TEST_CMPXCHG(a_cBits, a_Type, a_Fmt) do {\
1326 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHGU ## a_cBits, (a_Type *, a_Type *, a_Type, uint32_t *)); \
1327 static struct \
1328 { \
1329 const char *pszName; \
1330 FNIEMAIMPLCMPXCHGU ## a_cBits *pfn; \
1331 PFNIEMAIMPLBINU ## a_cBits pfnSub; \
1332 BINU ## a_cBits ## _TEST_T const *paTests; \
1333 uint32_t cTests; \
1334 } const s_aFuncs[] = \
1335 { \
1336 { "cmpxchg_u" # a_cBits, iemAImpl_cmpxchg_u ## a_cBits, iemAImpl_sub_u ## a_cBits, \
1337 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
1338 { "cmpxchg_u" # a_cBits "_locked", iemAImpl_cmpxchg_u ## a_cBits ## _locked, iemAImpl_sub_u ## a_cBits, \
1339 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
1340 }; \
1341 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
1342 { \
1343 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
1344 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
1345 uint32_t const cTests = s_aFuncs[iFn].cTests; \
1346 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
1347 { \
1348 /* as is (99% likely to be negative). */ \
1349 uint32_t fEfl = paTests[iTest].fEflIn; \
1350 a_Type const uNew = paTests[iTest].uSrcIn + 0x42; \
1351 a_Type uA = paTests[iTest].uDstIn; \
1352 *g_pu ## a_cBits = paTests[iTest].uSrcIn; \
1353 a_Type const uExpect = uA != paTests[iTest].uSrcIn ? paTests[iTest].uSrcIn : uNew; \
1354 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
1355 if ( fEfl != paTests[iTest].fEflOut \
1356 || *g_pu ## a_cBits != uExpect \
1357 || uA != paTests[iTest].uSrcIn) \
1358 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
1359 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uSrcIn, paTests[iTest].uDstIn, \
1360 uNew, fEfl, *g_pu ## a_cBits, uA, paTests[iTest].fEflOut, uExpect, paTests[iTest].uSrcIn, \
1361 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1362 /* positive */ \
1363 uint32_t fEflExpect = paTests[iTest].fEflIn; \
1364 uA = paTests[iTest].uDstIn; \
1365 s_aFuncs[iFn].pfnSub(&uA, uA, &fEflExpect); \
1366 fEfl = paTests[iTest].fEflIn; \
1367 uA = paTests[iTest].uDstIn; \
1368 *g_pu ## a_cBits = uA; \
1369 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
1370 if ( fEfl != fEflExpect \
1371 || *g_pu ## a_cBits != uNew \
1372 || uA != paTests[iTest].uDstIn) \
1373 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
1374 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uDstIn, \
1375 uNew, fEfl, *g_pu ## a_cBits, uA, fEflExpect, uNew, paTests[iTest].uDstIn, \
1376 EFlagsDiff(fEfl, fEflExpect)); \
1377 } \
1378 } \
1379 } while(0)
1380 TEST_CMPXCHG(8, uint8_t, "%#04RX8");
1381 TEST_CMPXCHG(16, uint16_t, "%#06x");
1382 TEST_CMPXCHG(32, uint32_t, "%#010RX32");
1383#if ARCH_BITS != 32 /* calling convension issue, skipping as it's an unsupported host */
1384 TEST_CMPXCHG(64, uint64_t, "%#010RX64");
1385#endif
1386}
1387
1388static void CmpXchg8bTest(void)
1389{
1390 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG8B,(uint64_t *, PRTUINT64U, PRTUINT64U, uint32_t *));
1391 static struct
1392 {
1393 const char *pszName;
1394 FNIEMAIMPLCMPXCHG8B *pfn;
1395 } const s_aFuncs[] =
1396 {
1397 { "cmpxchg8b", iemAImpl_cmpxchg8b },
1398 { "cmpxchg8b_locked", iemAImpl_cmpxchg8b_locked },
1399 };
1400 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1401 {
1402 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1403 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1404 {
1405 uint64_t const uOldValue = RandU64();
1406 uint64_t const uNewValue = RandU64();
1407
1408 /* positive test. */
1409 RTUINT64U uA, uB;
1410 uB.u = uNewValue;
1411 uA.u = uOldValue;
1412 *g_pu64 = uOldValue;
1413 uint32_t fEflIn = RandEFlags();
1414 uint32_t fEfl = fEflIn;
1415 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1416 if ( fEfl != (fEflIn | X86_EFL_ZF)
1417 || *g_pu64 != uNewValue
1418 || uA.u != uOldValue)
1419 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1420 iTest, fEflIn, uOldValue, uOldValue, uNewValue,
1421 fEfl, *g_pu64, uA.u,
1422 (fEflIn | X86_EFL_ZF), uNewValue, uOldValue, EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1423 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1424
1425 /* negative */
1426 uint64_t const uExpect = ~uOldValue;
1427 *g_pu64 = uExpect;
1428 uA.u = uOldValue;
1429 uB.u = uNewValue;
1430 fEfl = fEflIn = RandEFlags();
1431 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1432 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1433 || *g_pu64 != uExpect
1434 || uA.u != uExpect)
1435 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1436 iTest + 1, fEflIn, uExpect, uOldValue, uNewValue,
1437 fEfl, *g_pu64, uA.u,
1438 (fEflIn & ~X86_EFL_ZF), uExpect, uExpect, EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1439 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1440 }
1441 }
1442}
1443
1444static void CmpXchg16bTest(void)
1445{
1446 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG16B,(PRTUINT128U, PRTUINT128U, PRTUINT128U, uint32_t *));
1447 static struct
1448 {
1449 const char *pszName;
1450 FNIEMAIMPLCMPXCHG16B *pfn;
1451 } const s_aFuncs[] =
1452 {
1453 { "cmpxchg16b", iemAImpl_cmpxchg16b },
1454 { "cmpxchg16b_locked", iemAImpl_cmpxchg16b_locked },
1455#if !defined(RT_ARCH_ARM64)
1456 { "cmpxchg16b_fallback", iemAImpl_cmpxchg16b_fallback },
1457#endif
1458 };
1459 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1460 {
1461#if !defined(IEM_WITHOUT_ASSEMBLY) && defined(RT_ARCH_AMD64)
1462 if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16))
1463 continue;
1464#endif
1465 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1466 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1467 {
1468 RTUINT128U const uOldValue = RandU128();
1469 RTUINT128U const uNewValue = RandU128();
1470
1471 /* positive test. */
1472 RTUINT128U uA, uB;
1473 uB = uNewValue;
1474 uA = uOldValue;
1475 *g_pu128 = uOldValue;
1476 uint32_t fEflIn = RandEFlags();
1477 uint32_t fEfl = fEflIn;
1478 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1479 if ( fEfl != (fEflIn | X86_EFL_ZF)
1480 || g_pu128->s.Lo != uNewValue.s.Lo
1481 || g_pu128->s.Hi != uNewValue.s.Hi
1482 || uA.s.Lo != uOldValue.s.Lo
1483 || uA.s.Hi != uOldValue.s.Hi)
1484 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1485 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1486 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1487 iTest, fEflIn, uOldValue.s.Hi, uOldValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1488 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1489 (fEflIn | X86_EFL_ZF), uNewValue.s.Hi, uNewValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo,
1490 EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1491 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1492
1493 /* negative */
1494 RTUINT128U const uExpect = RTUINT128_INIT(~uOldValue.s.Hi, ~uOldValue.s.Lo);
1495 *g_pu128 = uExpect;
1496 uA = uOldValue;
1497 uB = uNewValue;
1498 fEfl = fEflIn = RandEFlags();
1499 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1500 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1501 || g_pu128->s.Lo != uExpect.s.Lo
1502 || g_pu128->s.Hi != uExpect.s.Hi
1503 || uA.s.Lo != uExpect.s.Lo
1504 || uA.s.Hi != uExpect.s.Hi)
1505 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1506 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1507 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1508 iTest + 1, fEflIn, uExpect.s.Hi, uExpect.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1509 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1510 (fEflIn & ~X86_EFL_ZF), uExpect.s.Hi, uExpect.s.Lo, uExpect.s.Hi, uExpect.s.Lo,
1511 EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1512 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1513 }
1514 }
1515}
1516
1517
1518/*
1519 * Double shifts.
1520 *
1521 * Note! We use BINUxx_TEST_T with the shift value in the uMisc field.
1522 */
1523
1524#ifndef HAVE_SHIFT_DBL_TESTS_AMD
1525static const BINU16_TEST_T g_aTests_shrd_u16_amd[] = { {0} };
1526static const BINU16_TEST_T g_aTests_shld_u16_amd[] = { {0} };
1527static const BINU32_TEST_T g_aTests_shrd_u32_amd[] = { {0} };
1528static const BINU32_TEST_T g_aTests_shld_u32_amd[] = { {0} };
1529static const BINU64_TEST_T g_aTests_shrd_u64_amd[] = { {0} };
1530static const BINU64_TEST_T g_aTests_shld_u64_amd[] = { {0} };
1531#endif
1532#ifndef HAVE_SHIFT_DBL_TESTS_INTEL
1533static const BINU16_TEST_T g_aTests_shrd_u16_intel[] = { {0} };
1534static const BINU16_TEST_T g_aTests_shld_u16_intel[] = { {0} };
1535static const BINU32_TEST_T g_aTests_shrd_u32_intel[] = { {0} };
1536static const BINU32_TEST_T g_aTests_shld_u32_intel[] = { {0} };
1537static const BINU64_TEST_T g_aTests_shrd_u64_intel[] = { {0} };
1538static const BINU64_TEST_T g_aTests_shld_u64_intel[] = { {0} };
1539#endif
1540
1541#ifdef TSTIEMAIMPL_WITH_GENERATOR
1542# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests) \
1543void ShiftDblU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1544{ \
1545 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1546 { \
1547 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1548 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1549 continue; \
1550 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1551 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1552 { \
1553 BINU ## a_cBits ## _TEST_T Test; \
1554 Test.fEflIn = RandEFlags(); \
1555 Test.fEflOut = Test.fEflIn; \
1556 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
1557 Test.uDstOut = Test.uDstIn; \
1558 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
1559 Test.uMisc = RandU8() & (a_cBits * 4 - 1); /* need to go way beyond the a_cBits limit */ \
1560 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, Test.uMisc, &Test.fEflOut); \
1561 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %2u }, /* #%u */\n", \
1562 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
1563 } \
1564 RTStrmPrintf(pOut, "};\n"); \
1565 } \
1566}
1567#else
1568# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests)
1569#endif
1570
1571#define TEST_SHIFT_DBL(a_cBits, a_Type, a_Fmt, a_aSubTests) \
1572static const struct \
1573{ \
1574 const char *pszName; \
1575 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn; \
1576 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfnNative; \
1577 BINU ## a_cBits ## _TEST_T const *paTests; \
1578 uint32_t cTests, uExtra; \
1579 uint8_t idxCpuEflFlavour; \
1580} a_aSubTests[] = \
1581{ \
1582 ENTRY_AMD(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1583 ENTRY_INTEL(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1584 ENTRY_AMD(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1585 ENTRY_INTEL(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1586}; \
1587\
1588GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests) \
1589\
1590static void ShiftDblU ## a_cBits ## Test(void) \
1591{ \
1592 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1593 { \
1594 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1595 BINU ## a_cBits ## _TEST_T const * const paTests = a_aSubTests[iFn].paTests; \
1596 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1597 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1598 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
1599 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
1600 { \
1601 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1602 { \
1603 uint32_t fEfl = paTests[iTest].fEflIn; \
1604 a_Type uDst = paTests[iTest].uDstIn; \
1605 pfn(&uDst, paTests[iTest].uSrcIn, paTests[iTest].uMisc, &fEfl); \
1606 if ( uDst != paTests[iTest].uDstOut \
1607 || fEfl != paTests[iTest].fEflOut) \
1608 RTTestFailed(g_hTest, "#%03u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " shift=%-2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s%s\n", \
1609 iTest, iVar == 0 ? "" : "/n", paTests[iTest].fEflIn, \
1610 paTests[iTest].uDstIn, paTests[iTest].uSrcIn, (unsigned)paTests[iTest].uMisc, \
1611 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1612 EFlagsDiff(fEfl, paTests[iTest].fEflOut), uDst == paTests[iTest].uDstOut ? "" : " dst!"); \
1613 else \
1614 { \
1615 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1616 *g_pfEfl = paTests[iTest].fEflIn; \
1617 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, paTests[iTest].uMisc, g_pfEfl); \
1618 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1619 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1620 } \
1621 } \
1622 pfn = a_aSubTests[iFn].pfnNative; \
1623 } \
1624 } \
1625}
1626TEST_SHIFT_DBL(16, uint16_t, "%#06RX16", g_aShiftDblU16)
1627TEST_SHIFT_DBL(32, uint32_t, "%#010RX32", g_aShiftDblU32)
1628TEST_SHIFT_DBL(64, uint64_t, "%#018RX64", g_aShiftDblU64)
1629
1630#ifdef TSTIEMAIMPL_WITH_GENERATOR
1631static void ShiftDblGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1632{
1633 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_DBL_TESTS%s\n", pszCpuSuffU);
1634 ShiftDblU16Generate(pOut, cTests);
1635 ShiftDblU32Generate(pOut, cTests);
1636 ShiftDblU64Generate(pOut, cTests);
1637}
1638#endif
1639
1640static void ShiftDblTest(void)
1641{
1642 ShiftDblU16Test();
1643 ShiftDblU32Test();
1644 ShiftDblU64Test();
1645}
1646
1647
1648/*
1649 * Unary operators.
1650 *
1651 * Note! We use BINUxx_TEST_T ignoreing uSrcIn and uMisc.
1652 */
1653
1654#ifndef HAVE_UNARY_TESTS
1655# define DUMMY_UNARY_TESTS(a_cBits, a_Type) \
1656 static const a_Type g_aTests_inc_u ## a_cBits[] = { {0} }; \
1657 static const a_Type g_aTests_inc_u ## a_cBits ## _locked[] = { {0} }; \
1658 static const a_Type g_aTests_dec_u ## a_cBits[] = { {0} }; \
1659 static const a_Type g_aTests_dec_u ## a_cBits ## _locked[] = { {0} }; \
1660 static const a_Type g_aTests_not_u ## a_cBits[] = { {0} }; \
1661 static const a_Type g_aTests_not_u ## a_cBits ## _locked[] = { {0} }; \
1662 static const a_Type g_aTests_neg_u ## a_cBits[] = { {0} }; \
1663 static const a_Type g_aTests_neg_u ## a_cBits ## _locked[] = { {0} }
1664DUMMY_UNARY_TESTS(8, BINU8_TEST_T);
1665DUMMY_UNARY_TESTS(16, BINU16_TEST_T);
1666DUMMY_UNARY_TESTS(32, BINU32_TEST_T);
1667DUMMY_UNARY_TESTS(64, BINU64_TEST_T);
1668#endif
1669
1670#define TEST_UNARY(a_cBits, a_Type, a_Fmt, a_TestType) \
1671static const struct \
1672{ \
1673 const char *pszName; \
1674 PFNIEMAIMPLUNARYU ## a_cBits pfn; \
1675 PFNIEMAIMPLUNARYU ## a_cBits pfnNative; \
1676 a_TestType const *paTests; \
1677 uint32_t cTests, uExtra; \
1678 uint8_t idxCpuEflFlavour; \
1679} g_aUnaryU ## a_cBits [] = \
1680{ \
1681 ENTRY(inc_u ## a_cBits), \
1682 ENTRY(inc_u ## a_cBits ## _locked), \
1683 ENTRY(dec_u ## a_cBits), \
1684 ENTRY(dec_u ## a_cBits ## _locked), \
1685 ENTRY(not_u ## a_cBits), \
1686 ENTRY(not_u ## a_cBits ## _locked), \
1687 ENTRY(neg_u ## a_cBits), \
1688 ENTRY(neg_u ## a_cBits ## _locked), \
1689}; \
1690\
1691void UnaryU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1692{ \
1693 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1694 { \
1695 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", g_aUnaryU ## a_cBits[iFn].pszName); \
1696 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1697 { \
1698 a_TestType Test; \
1699 Test.fEflIn = RandEFlags(); \
1700 Test.fEflOut = Test.fEflIn; \
1701 Test.uDstIn = RandU ## a_cBits(); \
1702 Test.uDstOut = Test.uDstIn; \
1703 Test.uSrcIn = 0; \
1704 Test.uMisc = 0; \
1705 g_aUnaryU ## a_cBits[iFn].pfn(&Test.uDstOut, &Test.fEflOut); \
1706 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, 0 }, /* #%u */\n", \
1707 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, iTest); \
1708 } \
1709 RTStrmPrintf(pOut, "};\n"); \
1710 } \
1711} \
1712\
1713static void UnaryU ## a_cBits ## Test(void) \
1714{ \
1715 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1716 { \
1717 RTTestSub(g_hTest, g_aUnaryU ## a_cBits[iFn].pszName); \
1718 a_TestType const * const paTests = g_aUnaryU ## a_cBits[iFn].paTests; \
1719 uint32_t const cTests = g_aUnaryU ## a_cBits[iFn].cTests; \
1720 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1721 { \
1722 uint32_t fEfl = paTests[iTest].fEflIn; \
1723 a_Type uDst = paTests[iTest].uDstIn; \
1724 g_aUnaryU ## a_cBits[iFn].pfn(&uDst, &fEfl); \
1725 if ( uDst != paTests[iTest].uDstOut \
1726 || fEfl != paTests[iTest].fEflOut) \
1727 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1728 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, \
1729 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1730 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1731 else \
1732 { \
1733 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1734 *g_pfEfl = paTests[iTest].fEflIn; \
1735 g_aUnaryU ## a_cBits[iFn].pfn(g_pu ## a_cBits, g_pfEfl); \
1736 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1737 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1738 } \
1739 } \
1740 } \
1741}
1742TEST_UNARY(8, uint8_t, "%#04RX8", BINU8_TEST_T)
1743TEST_UNARY(16, uint16_t, "%#06RX16", BINU16_TEST_T)
1744TEST_UNARY(32, uint32_t, "%#010RX32", BINU32_TEST_T)
1745TEST_UNARY(64, uint64_t, "%#018RX64", BINU64_TEST_T)
1746
1747#ifdef TSTIEMAIMPL_WITH_GENERATOR
1748static void UnaryGenerate(PRTSTREAM pOut, uint32_t cTests)
1749{
1750 RTStrmPrintf(pOut, "\n\n#define HAVE_UNARY_TESTS\n");
1751 UnaryU8Generate(pOut, cTests);
1752 UnaryU16Generate(pOut, cTests);
1753 UnaryU32Generate(pOut, cTests);
1754 UnaryU64Generate(pOut, cTests);
1755}
1756#endif
1757
1758static void UnaryTest(void)
1759{
1760 UnaryU8Test();
1761 UnaryU16Test();
1762 UnaryU32Test();
1763 UnaryU64Test();
1764}
1765
1766
1767/*
1768 * Shifts.
1769 *
1770 * Note! We use BINUxx_TEST_T with the shift count in uMisc and uSrcIn unused.
1771 */
1772#define DUMMY_SHIFT_TESTS(a_cBits, a_Type, a_Vendor) \
1773 static const a_Type g_aTests_rol_u ## a_cBits ## a_Vendor[] = { {0} }; \
1774 static const a_Type g_aTests_ror_u ## a_cBits ## a_Vendor[] = { {0} }; \
1775 static const a_Type g_aTests_rcl_u ## a_cBits ## a_Vendor[] = { {0} }; \
1776 static const a_Type g_aTests_rcr_u ## a_cBits ## a_Vendor[] = { {0} }; \
1777 static const a_Type g_aTests_shl_u ## a_cBits ## a_Vendor[] = { {0} }; \
1778 static const a_Type g_aTests_shr_u ## a_cBits ## a_Vendor[] = { {0} }; \
1779 static const a_Type g_aTests_sar_u ## a_cBits ## a_Vendor[] = { {0} }
1780#ifndef HAVE_SHIFT_TESTS_AMD
1781DUMMY_SHIFT_TESTS(8, BINU8_TEST_T, _amd);
1782DUMMY_SHIFT_TESTS(16, BINU16_TEST_T, _amd);
1783DUMMY_SHIFT_TESTS(32, BINU32_TEST_T, _amd);
1784DUMMY_SHIFT_TESTS(64, BINU64_TEST_T, _amd);
1785#endif
1786#ifndef HAVE_SHIFT_TESTS_INTEL
1787DUMMY_SHIFT_TESTS(8, BINU8_TEST_T, _intel);
1788DUMMY_SHIFT_TESTS(16, BINU16_TEST_T, _intel);
1789DUMMY_SHIFT_TESTS(32, BINU32_TEST_T, _intel);
1790DUMMY_SHIFT_TESTS(64, BINU64_TEST_T, _intel);
1791#endif
1792
1793#ifdef TSTIEMAIMPL_WITH_GENERATOR
1794# define GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1795void ShiftU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1796{ \
1797 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1798 { \
1799 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1800 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1801 continue; \
1802 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1803 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1804 { \
1805 a_TestType Test; \
1806 Test.fEflIn = RandEFlags(); \
1807 Test.fEflOut = Test.fEflIn; \
1808 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
1809 Test.uDstOut = Test.uDstIn; \
1810 Test.uSrcIn = 0; \
1811 Test.uMisc = RandU8() & (a_cBits * 4 - 1); /* need to go way beyond the a_cBits limit */ \
1812 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
1813 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, %-2u }, /* #%u */\n", \
1814 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uMisc, iTest); \
1815 \
1816 Test.fEflIn = (~Test.fEflIn & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK; \
1817 Test.fEflOut = Test.fEflIn; \
1818 Test.uDstOut = Test.uDstIn; \
1819 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
1820 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, %-2u }, /* #%u b */\n", \
1821 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uMisc, iTest); \
1822 } \
1823 RTStrmPrintf(pOut, "};\n"); \
1824 } \
1825}
1826#else
1827# define GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests)
1828#endif
1829
1830#define TEST_SHIFT(a_cBits, a_Type, a_Fmt, a_TestType, a_aSubTests) \
1831static const struct \
1832{ \
1833 const char *pszName; \
1834 PFNIEMAIMPLSHIFTU ## a_cBits pfn; \
1835 PFNIEMAIMPLSHIFTU ## a_cBits pfnNative; \
1836 a_TestType const *paTests; \
1837 uint32_t cTests, uExtra; \
1838 uint8_t idxCpuEflFlavour; \
1839} a_aSubTests[] = \
1840{ \
1841 ENTRY_AMD( rol_u ## a_cBits, X86_EFL_OF), \
1842 ENTRY_INTEL(rol_u ## a_cBits, X86_EFL_OF), \
1843 ENTRY_AMD( ror_u ## a_cBits, X86_EFL_OF), \
1844 ENTRY_INTEL(ror_u ## a_cBits, X86_EFL_OF), \
1845 ENTRY_AMD( rcl_u ## a_cBits, X86_EFL_OF), \
1846 ENTRY_INTEL(rcl_u ## a_cBits, X86_EFL_OF), \
1847 ENTRY_AMD( rcr_u ## a_cBits, X86_EFL_OF), \
1848 ENTRY_INTEL(rcr_u ## a_cBits, X86_EFL_OF), \
1849 ENTRY_AMD( shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1850 ENTRY_INTEL(shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1851 ENTRY_AMD( shr_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1852 ENTRY_INTEL(shr_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1853 ENTRY_AMD( sar_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1854 ENTRY_INTEL(sar_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1855}; \
1856\
1857GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1858\
1859static void ShiftU ## a_cBits ## Test(void) \
1860{ \
1861 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1862 { \
1863 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1864 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
1865 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1866 PFNIEMAIMPLSHIFTU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1867 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
1868 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
1869 { \
1870 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1871 { \
1872 uint32_t fEfl = paTests[iTest].fEflIn; \
1873 a_Type uDst = paTests[iTest].uDstIn; \
1874 pfn(&uDst, paTests[iTest].uMisc, &fEfl); \
1875 if ( uDst != paTests[iTest].uDstOut \
1876 || fEfl != paTests[iTest].fEflOut ) \
1877 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " shift=%2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1878 iTest, iVar == 0 ? "" : "/n", \
1879 paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uMisc, \
1880 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1881 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1882 else \
1883 { \
1884 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1885 *g_pfEfl = paTests[iTest].fEflIn; \
1886 pfn(g_pu ## a_cBits, paTests[iTest].uMisc, g_pfEfl); \
1887 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1888 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1889 } \
1890 } \
1891 pfn = a_aSubTests[iFn].pfnNative; \
1892 } \
1893 } \
1894}
1895TEST_SHIFT(8, uint8_t, "%#04RX8", BINU8_TEST_T, g_aShiftU8)
1896TEST_SHIFT(16, uint16_t, "%#06RX16", BINU16_TEST_T, g_aShiftU16)
1897TEST_SHIFT(32, uint32_t, "%#010RX32", BINU32_TEST_T, g_aShiftU32)
1898TEST_SHIFT(64, uint64_t, "%#018RX64", BINU64_TEST_T, g_aShiftU64)
1899
1900#ifdef TSTIEMAIMPL_WITH_GENERATOR
1901static void ShiftGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1902{
1903 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_TESTS%s\n", pszCpuSuffU);
1904 ShiftU8Generate(pOut, cTests);
1905 ShiftU16Generate(pOut, cTests);
1906 ShiftU32Generate(pOut, cTests);
1907 ShiftU64Generate(pOut, cTests);
1908}
1909#endif
1910
1911static void ShiftTest(void)
1912{
1913 ShiftU8Test();
1914 ShiftU16Test();
1915 ShiftU32Test();
1916 ShiftU64Test();
1917}
1918
1919
1920/*
1921 * Multiplication and division.
1922 *
1923 * Note! The 8-bit functions has a different format, so we need to duplicate things.
1924 * Note! Currently ignoring undefined bits.
1925 */
1926
1927# define DUMMY_MULDIV_TESTS(a_cBits, a_Type, a_Vendor) \
1928 static const a_Type g_aTests_mul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1929 static const a_Type g_aTests_imul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1930 static const a_Type g_aTests_div_u ## a_cBits ## a_Vendor[] = { {0} }; \
1931 static const a_Type g_aTests_idiv_u ## a_cBits ## a_Vendor[] = { {0} }
1932
1933#ifndef HAVE_MULDIV_TESTS_AMD
1934DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _amd);
1935DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _amd);
1936DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _amd);
1937DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _amd);
1938#endif
1939
1940#ifndef HAVE_MULDIV_TESTS_INTEL
1941DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _intel);
1942DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _intel);
1943DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _intel);
1944DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _intel);
1945#endif
1946
1947/* U8 */
1948static const struct
1949{
1950 const char *pszName;
1951 PFNIEMAIMPLMULDIVU8 pfn;
1952 PFNIEMAIMPLMULDIVU8 pfnNative;
1953 MULDIVU8_TEST_T const *paTests;
1954 uint32_t cTests, uExtra;
1955 uint8_t idxCpuEflFlavour;
1956} g_aMulDivU8[] =
1957{
1958 ENTRY_AMD_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1959 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1960 ENTRY_INTEL_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1961 ENTRY_AMD_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1962 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1963 ENTRY_INTEL_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1964 ENTRY_AMD_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1965 ENTRY_INTEL_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1966 ENTRY_AMD_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1967 ENTRY_INTEL_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1968};
1969
1970#ifdef TSTIEMAIMPL_WITH_GENERATOR
1971static void MulDivU8Generate(PRTSTREAM pOut, uint32_t cTests)
1972{
1973 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1974 {
1975 if ( g_aMulDivU8[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
1976 && g_aMulDivU8[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
1977 continue;
1978 RTStrmPrintf(pOut, "static const MULDIVU8_TEST_T g_aTests_%s[] =\n{\n", g_aMulDivU8[iFn].pszName);
1979 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
1980 {
1981 MULDIVU8_TEST_T Test;
1982 Test.fEflIn = RandEFlags();
1983 Test.fEflOut = Test.fEflIn;
1984 Test.uDstIn = RandU16Dst(iTest);
1985 Test.uDstOut = Test.uDstIn;
1986 Test.uSrcIn = RandU8Src(iTest);
1987 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut);
1988 RTStrmPrintf(pOut, " { %#08x, %#08x, %#06RX16, %#06RX16, %#04RX8, %d }, /* #%u */\n",
1989 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.rc, iTest);
1990 }
1991 RTStrmPrintf(pOut, "};\n");
1992 }
1993}
1994#endif
1995
1996static void MulDivU8Test(void)
1997{
1998 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1999 {
2000 RTTestSub(g_hTest, g_aMulDivU8[iFn].pszName);
2001 MULDIVU8_TEST_T const * const paTests = g_aMulDivU8[iFn].paTests;
2002 uint32_t const cTests = g_aMulDivU8[iFn].cTests;
2003 uint32_t const fEflIgn = g_aMulDivU8[iFn].uExtra;
2004 PFNIEMAIMPLMULDIVU8 pfn = g_aMulDivU8[iFn].pfn;
2005 uint32_t const cVars = 1 + (g_aMulDivU8[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && g_aMulDivU8[iFn].pfnNative);
2006 for (uint32_t iVar = 0; iVar < cVars; iVar++)
2007 {
2008 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
2009 {
2010 uint32_t fEfl = paTests[iTest].fEflIn;
2011 uint16_t uDst = paTests[iTest].uDstIn;
2012 int rc = g_aMulDivU8[iFn].pfn(&uDst, paTests[iTest].uSrcIn, &fEfl);
2013 if ( uDst != paTests[iTest].uDstOut
2014 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)
2015 || rc != paTests[iTest].rc)
2016 RTTestFailed(g_hTest, "#%02u%s: efl=%#08x dst=%#06RX16 src=%#04RX8\n"
2017 " %s-> efl=%#08x dst=%#06RX16 rc=%d\n"
2018 "%sexpected %#08x %#06RX16 %d%s\n",
2019 iTest, iVar ? "/n" : "", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn,
2020 iVar ? " " : "", fEfl, uDst, rc,
2021 iVar ? " " : "", paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].rc,
2022 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn));
2023 else
2024 {
2025 *g_pu16 = paTests[iTest].uDstIn;
2026 *g_pfEfl = paTests[iTest].fEflIn;
2027 rc = g_aMulDivU8[iFn].pfn(g_pu16, paTests[iTest].uSrcIn, g_pfEfl);
2028 RTTEST_CHECK(g_hTest, *g_pu16 == paTests[iTest].uDstOut);
2029 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn));
2030 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc);
2031 }
2032 }
2033 pfn = g_aMulDivU8[iFn].pfnNative;
2034 }
2035 }
2036}
2037
2038#ifdef TSTIEMAIMPL_WITH_GENERATOR
2039# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
2040void MulDivU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
2041{ \
2042 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2043 { \
2044 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
2045 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
2046 continue; \
2047 RTStrmPrintf(pOut, "static const MULDIVU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
2048 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2049 { \
2050 a_TestType Test; \
2051 Test.fEflIn = RandEFlags(); \
2052 Test.fEflOut = Test.fEflIn; \
2053 Test.uDst1In = RandU ## a_cBits ## Dst(iTest); \
2054 Test.uDst1Out = Test.uDst1In; \
2055 Test.uDst2In = RandU ## a_cBits ## Dst(iTest); \
2056 Test.uDst2Out = Test.uDst2In; \
2057 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
2058 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \
2059 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", %d }, /* #%u */\n", \
2060 Test.fEflIn, Test.fEflOut, Test.uDst1In, Test.uDst1Out, Test.uDst2In, Test.uDst2Out, Test.uSrcIn, \
2061 Test.rc, iTest); \
2062 } \
2063 RTStrmPrintf(pOut, "};\n"); \
2064 } \
2065}
2066#else
2067# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests)
2068#endif
2069
2070#define TEST_MULDIV(a_cBits, a_Type, a_Fmt, a_TestType, a_aSubTests) \
2071static const struct \
2072{ \
2073 const char *pszName; \
2074 PFNIEMAIMPLMULDIVU ## a_cBits pfn; \
2075 PFNIEMAIMPLMULDIVU ## a_cBits pfnNative; \
2076 a_TestType const *paTests; \
2077 uint32_t cTests, uExtra; \
2078 uint8_t idxCpuEflFlavour; \
2079} a_aSubTests [] = \
2080{ \
2081 ENTRY_AMD_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2082 ENTRY_INTEL_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2083 ENTRY_AMD_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2084 ENTRY_INTEL_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2085 ENTRY_AMD_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
2086 ENTRY_INTEL_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
2087 ENTRY_AMD_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
2088 ENTRY_INTEL_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
2089}; \
2090\
2091GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
2092\
2093static void MulDivU ## a_cBits ## Test(void) \
2094{ \
2095 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2096 { \
2097 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
2098 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
2099 uint32_t const cTests = a_aSubTests[iFn].cTests; \
2100 uint32_t const fEflIgn = a_aSubTests[iFn].uExtra; \
2101 PFNIEMAIMPLMULDIVU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
2102 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
2103 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
2104 { \
2105 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2106 { \
2107 uint32_t fEfl = paTests[iTest].fEflIn; \
2108 a_Type uDst1 = paTests[iTest].uDst1In; \
2109 a_Type uDst2 = paTests[iTest].uDst2In; \
2110 int rc = pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, &fEfl); \
2111 if ( uDst1 != paTests[iTest].uDst1Out \
2112 || uDst2 != paTests[iTest].uDst2Out \
2113 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)\
2114 || rc != paTests[iTest].rc) \
2115 RTTestFailed(g_hTest, "#%02u%s: efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " src=" a_Fmt "\n" \
2116 " -> efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " rc=%d\n" \
2117 "expected %#08x " a_Fmt " " a_Fmt " %d%s -%s%s%s\n", \
2118 iTest, iVar == 0 ? "" : "/n", \
2119 paTests[iTest].fEflIn, paTests[iTest].uDst1In, paTests[iTest].uDst2In, paTests[iTest].uSrcIn, \
2120 fEfl, uDst1, uDst2, rc, \
2121 paTests[iTest].fEflOut, paTests[iTest].uDst1Out, paTests[iTest].uDst2Out, paTests[iTest].rc, \
2122 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn), \
2123 uDst1 != paTests[iTest].uDst1Out ? " dst1" : "", uDst2 != paTests[iTest].uDst2Out ? " dst2" : "", \
2124 (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn) ? " eflags" : ""); \
2125 else \
2126 { \
2127 *g_pu ## a_cBits = paTests[iTest].uDst1In; \
2128 *g_pu ## a_cBits ## Two = paTests[iTest].uDst2In; \
2129 *g_pfEfl = paTests[iTest].fEflIn; \
2130 rc = pfn(g_pu ## a_cBits, g_pu ## a_cBits ## Two, paTests[iTest].uSrcIn, g_pfEfl); \
2131 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDst1Out); \
2132 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits ## Two == paTests[iTest].uDst2Out); \
2133 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); \
2134 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); \
2135 } \
2136 } \
2137 pfn = a_aSubTests[iFn].pfnNative; \
2138 } \
2139 } \
2140}
2141TEST_MULDIV(16, uint16_t, "%#06RX16", MULDIVU16_TEST_T, g_aMulDivU16)
2142TEST_MULDIV(32, uint32_t, "%#010RX32", MULDIVU32_TEST_T, g_aMulDivU32)
2143TEST_MULDIV(64, uint64_t, "%#018RX64", MULDIVU64_TEST_T, g_aMulDivU64)
2144
2145#ifdef TSTIEMAIMPL_WITH_GENERATOR
2146static void MulDivGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
2147{
2148 RTStrmPrintf(pOut, "\n\n#define HAVE_MULDIV_TESTS%s\n", pszCpuSuffU);
2149 MulDivU8Generate(pOut, cTests);
2150 MulDivU16Generate(pOut, cTests);
2151 MulDivU32Generate(pOut, cTests);
2152 MulDivU64Generate(pOut, cTests);
2153}
2154#endif
2155
2156static void MulDivTest(void)
2157{
2158 MulDivU8Test();
2159 MulDivU16Test();
2160 MulDivU32Test();
2161 MulDivU64Test();
2162}
2163
2164
2165/*
2166 * BSWAP
2167 */
2168static void BswapTest(void)
2169{
2170 RTTestSub(g_hTest, "bswap_u16");
2171 *g_pu32 = UINT32_C(0x12345678);
2172 iemAImpl_bswap_u16(g_pu32);
2173#if 0
2174 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12347856), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
2175#else
2176 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12340000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
2177#endif
2178 *g_pu32 = UINT32_C(0xffff1122);
2179 iemAImpl_bswap_u16(g_pu32);
2180#if 0
2181 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff2211), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
2182#else
2183 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff0000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
2184#endif
2185
2186 RTTestSub(g_hTest, "bswap_u32");
2187 *g_pu32 = UINT32_C(0x12345678);
2188 iemAImpl_bswap_u32(g_pu32);
2189 RTTEST_CHECK(g_hTest, *g_pu32 == UINT32_C(0x78563412));
2190
2191 RTTestSub(g_hTest, "bswap_u64");
2192 *g_pu64 = UINT64_C(0x0123456789abcdef);
2193 iemAImpl_bswap_u64(g_pu64);
2194 RTTEST_CHECK(g_hTest, *g_pu64 == UINT64_C(0xefcdab8967452301));
2195}
2196
2197
2198
2199/*********************************************************************************************************************************
2200* Floating point (x87 style) *
2201*********************************************************************************************************************************/
2202
2203typedef struct FPU_LD_CONST_TEST_T
2204{
2205 uint16_t fFcw;
2206 uint16_t fFswIn;
2207 uint16_t fFswOut;
2208 RTFLOAT80U rdResult;
2209} FPU_LD_CONST_TEST_T;
2210
2211typedef struct FPU_R32_IN_TEST_T
2212{
2213 uint16_t fFcw;
2214 uint16_t fFswIn;
2215 uint16_t fFswOut;
2216 RTFLOAT80U rdResult;
2217 RTFLOAT32U InVal;
2218} FPU_R32_IN_TEST_T;
2219
2220typedef struct FPU_R64_IN_TEST_T
2221{
2222 uint16_t fFcw;
2223 uint16_t fFswIn;
2224 uint16_t fFswOut;
2225 RTFLOAT80U rdResult;
2226 RTFLOAT64U InVal;
2227} FPU_R64_IN_TEST_T;
2228
2229typedef struct FPU_R80_IN_TEST_T
2230{
2231 uint16_t fFcw;
2232 uint16_t fFswIn;
2233 uint16_t fFswOut;
2234 RTFLOAT80U rdResult;
2235 RTFLOAT80U InVal;
2236} FPU_R80_IN_TEST_T;
2237
2238typedef struct FPU_ST_R32_TEST_T
2239{
2240 uint16_t fFcw;
2241 uint16_t fFswIn;
2242 uint16_t fFswOut;
2243 RTFLOAT80U InVal;
2244 RTFLOAT32U OutVal;
2245} FPU_ST_R32_TEST_T;
2246
2247typedef struct FPU_ST_R64_TEST_T
2248{
2249 uint16_t fFcw;
2250 uint16_t fFswIn;
2251 uint16_t fFswOut;
2252 RTFLOAT80U InVal;
2253 RTFLOAT64U OutVal;
2254} FPU_ST_R64_TEST_T;
2255
2256typedef struct FPU_ST_R80_TEST_T
2257{
2258 uint16_t fFcw;
2259 uint16_t fFswIn;
2260 uint16_t fFswOut;
2261 RTFLOAT80U InVal;
2262 RTFLOAT80U OutVal;
2263} FPU_ST_R80_TEST_T;
2264
2265#include "tstIEMAImplDataFpuLdSt.h"
2266
2267
2268/*
2269 * FPU constant loading.
2270 */
2271
2272#ifndef HAVE_FPU_LOAD_CONST_TESTS
2273static const FPU_LD_CONST_TEST_T g_aTests_fld1[] = { {0} };
2274static const FPU_LD_CONST_TEST_T g_aTests_fldl2t[] = { {0} };
2275static const FPU_LD_CONST_TEST_T g_aTests_fldl2e[] = { {0} };
2276static const FPU_LD_CONST_TEST_T g_aTests_fldpi[] = { {0} };
2277static const FPU_LD_CONST_TEST_T g_aTests_fldlg2[] = { {0} };
2278static const FPU_LD_CONST_TEST_T g_aTests_fldln2[] = { {0} };
2279static const FPU_LD_CONST_TEST_T g_aTests_fldz[] = { {0} };
2280#endif
2281
2282typedef struct FPU_LD_CONST_T
2283{
2284 const char *pszName;
2285 PFNIEMAIMPLFPUR80LDCONST pfn;
2286 PFNIEMAIMPLFPUR80LDCONST pfnNative;
2287 FPU_LD_CONST_TEST_T const *paTests;
2288 uint32_t cTests;
2289 uint32_t uExtra;
2290 uint8_t idxCpuEflFlavour;
2291} FPU_LD_CONST_T;
2292
2293static const FPU_LD_CONST_T g_aFpuLdConst[] =
2294{
2295 ENTRY(fld1),
2296 ENTRY(fldl2t),
2297 ENTRY(fldl2e),
2298 ENTRY(fldpi),
2299 ENTRY(fldlg2),
2300 ENTRY(fldln2),
2301 ENTRY(fldz),
2302};
2303
2304#ifdef TSTIEMAIMPL_WITH_GENERATOR
2305static void FpuLdConstGenerate(PRTSTREAM pOut, uint32_t cTests)
2306{
2307 RTStrmPrintf(pOut, "\n\n#define HAVE_FPU_LOAD_CONST_TESTS\n");
2308 X86FXSTATE State;
2309 RT_ZERO(State);
2310 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuLdConst); iFn++)
2311 {
2312 RTStrmPrintf(pOut, "static const FPU_LD_CONST_TEST_T g_aTests_%s[] =\n{\n", g_aFpuLdConst[iFn].pszName);
2313 for (uint32_t iTest = 0; iTest < cTests; iTest += 4)
2314 {
2315 State.FCW = RandU16() & (X86_FCW_MASK_ALL | X86_FCW_PC_MASK);
2316 State.FSW = RandU16() & (X86_FSW_C_MASK | X86_FSW_XCPT_ES_MASK | X86_FSW_TOP_MASK | X86_FSW_B);
2317
2318 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
2319 {
2320 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
2321 State.FCW = (State.FCW & ~X86_FCW_RC_MASK) | (iRounding << X86_FCW_RC_SHIFT);
2322 g_aFpuLdConst[iFn].pfn(&State, &Res);
2323 RTStrmPrintf(pOut, " { %#06x, %#06x, %#06x, %s }, /* #%u */\n",
2324 State.FCW, State.FSW, Res.FSW, GenFormatR80(&Res.r80Result), iTest + iRounding);
2325 }
2326 }
2327 RTStrmPrintf(pOut, "};\n");
2328 }
2329}
2330#endif
2331
2332static void FpuLoadConstTest(void)
2333{
2334 /*
2335 * Inputs:
2336 * - FSW: C0, C1, C2, C3
2337 * - FCW: Exception masks, Precision control, Rounding control.
2338 *
2339 * C1 set to 1 on stack overflow, zero otherwise. C0, C2, and C3 are "undefined".
2340 */
2341 X86FXSTATE State;
2342 RT_ZERO(State);
2343 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuLdConst); iFn++)
2344 {
2345 RTTestSub(g_hTest, g_aFpuLdConst[iFn].pszName);
2346
2347 uint32_t const cTests = g_aFpuLdConst[iFn].cTests;
2348 FPU_LD_CONST_TEST_T const *paTests = g_aFpuLdConst[iFn].paTests;
2349 PFNIEMAIMPLFPUR80LDCONST pfn = g_aFpuLdConst[iFn].pfn;
2350 uint32_t const cVars = 1 + (g_aFpuLdConst[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && g_aFpuLdConst[iFn].pfnNative);
2351 for (uint32_t iVar = 0; iVar < cVars; iVar++)
2352 {
2353 for (uint32_t iTest = 0; iTest < cTests; iTest++)
2354 {
2355 State.FCW = paTests[iTest].fFcw;
2356 State.FSW = paTests[iTest].fFswIn;
2357 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
2358 pfn(&State, &Res);
2359 if ( Res.FSW != paTests[iTest].fFswOut
2360 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult))
2361 RTTestFailed(g_hTest, "#%u%s: fcw=%#06x fsw=%#06x -> fsw=%#06x %s, expected %#06x %s%s%s (%s)\n",
2362 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
2363 Res.FSW, FormatR80(&Res.r80Result),
2364 paTests[iTest].fFswOut, FormatR80(&paTests[iTest].rdResult),
2365 FswDiff(Res.FSW, paTests[iTest].fFswOut),
2366 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult) ? " - val" : "",
2367 FormatFcw(paTests[iTest].fFcw) );
2368 }
2369 pfn = g_aFpuLdConst[iFn].pfnNative;
2370 }
2371 }
2372}
2373
2374
2375/*
2376 * Load values from memory.
2377 */
2378
2379#ifndef HAVE_FPU_LD_MEM
2380static FPU_R80_IN_TEST_T const g_aTests_fld_r80_from_r80[] = { {0} };
2381static FPU_R64_IN_TEST_T const g_aTests_fld_r80_from_r64[] = { {0} };
2382static FPU_R32_IN_TEST_T const g_aTests_fld_r80_from_r32[] = { {0} };
2383#endif
2384
2385#ifdef TSTIEMAIMPL_WITH_GENERATOR
2386# define GEN_FPU_LOAD(a_cBits, a_rdTypeIn, a_aSubTests, a_TestType) \
2387static void FpuLdR ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
2388{ \
2389 X86FXSTATE State; \
2390 RT_ZERO(State); \
2391 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2392 { \
2393 RTStrmPrintf(pOut, "static const " #a_TestType " g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
2394 for (uint32_t iTest = 0; iTest < cTests; iTest += 4) \
2395 { \
2396 State.FCW = RandU16() & (X86_FCW_MASK_ALL | X86_FCW_PC_MASK); \
2397 State.FSW = RandU16() & (X86_FSW_C_MASK | X86_FSW_XCPT_ES_MASK | X86_FSW_TOP_MASK | X86_FSW_B); \
2398 a_rdTypeIn InVal = RandR ## a_cBits ## Src(iTest); \
2399 \
2400 for (uint16_t iRounding = 0; iRounding < 4; iRounding++) \
2401 { \
2402 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
2403 State.FCW = (State.FCW & ~X86_FCW_RC_MASK) | (iRounding << X86_FCW_RC_SHIFT); \
2404 a_aSubTests[iFn].pfn(&State, &Res, &InVal); \
2405 RTStrmPrintf(pOut, " { %#06x, %#06x, %#06x, %s, %s }, /* #%u */\n", \
2406 State.FCW, State.FSW, Res.FSW, GenFormatR80(&Res.r80Result), \
2407 GenFormatR ## a_cBits(&InVal), iTest + iRounding); \
2408 } \
2409 } \
2410 RTStrmPrintf(pOut, "};\n"); \
2411 } \
2412}
2413#else
2414# define GEN_FPU_LOAD(a_cBits, a_rdTypeIn, a_aSubTests, a_TestType)
2415#endif
2416
2417#define TEST_FPU_LOAD(a_cBits, a_rdTypeIn, a_SubTestType, a_aSubTests, a_TestType) \
2418typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPULDR80FROM ## a_cBits,(PCX86FXSTATE, PIEMFPURESULT, PC ## a_rdTypeIn)); \
2419typedef FNIEMAIMPLFPULDR80FROM ## a_cBits *PFNIEMAIMPLFPULDR80FROM ## a_cBits; \
2420typedef struct a_SubTestType \
2421{ \
2422 const char *pszName; \
2423 PFNIEMAIMPLFPULDR80FROM ## a_cBits pfn, pfnNative; \
2424 a_TestType const *paTests; \
2425 uint32_t cTests; \
2426 uint32_t uExtra; \
2427 uint8_t idxCpuEflFlavour; \
2428} a_SubTestType; \
2429\
2430static const a_SubTestType a_aSubTests[] = \
2431{ \
2432 ENTRY(RT_CONCAT(fld_r80_from_r,a_cBits)) \
2433}; \
2434GEN_FPU_LOAD(a_cBits, a_rdTypeIn, a_aSubTests, a_TestType) \
2435\
2436static void FpuLdR ## a_cBits ## Test(void) \
2437{ \
2438 X86FXSTATE State; \
2439 RT_ZERO(State); \
2440 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2441 { \
2442 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
2443 \
2444 uint32_t const cTests = a_aSubTests[iFn].cTests; \
2445 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
2446 PFNIEMAIMPLFPULDR80FROM ## a_cBits pfn = a_aSubTests[iFn].pfn; \
2447 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
2448 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
2449 { \
2450 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
2451 { \
2452 a_rdTypeIn const InVal = paTests[iTest].InVal; \
2453 State.FCW = paTests[iTest].fFcw; \
2454 State.FSW = paTests[iTest].fFswIn; \
2455 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
2456 pfn(&State, &Res, &InVal); \
2457 if ( Res.FSW != paTests[iTest].fFswOut \
2458 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult)) \
2459 RTTestFailed(g_hTest, "#%03u%s: fcw=%#06x fsw=%#06x in=%s\n" \
2460 "%s -> fsw=%#06x %s\n" \
2461 "%s expected %#06x %s%s%s (%s)\n", \
2462 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, \
2463 FormatR ## a_cBits(&paTests[iTest].InVal), \
2464 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result), \
2465 iVar ? " " : "", paTests[iTest].fFswOut, FormatR80(&paTests[iTest].rdResult), \
2466 FswDiff(Res.FSW, paTests[iTest].fFswOut), \
2467 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult) ? " - val" : "", \
2468 FormatFcw(paTests[iTest].fFcw) ); \
2469 } \
2470 pfn = a_aSubTests[iFn].pfnNative; \
2471 } \
2472 } \
2473}
2474
2475TEST_FPU_LOAD(80, RTFLOAT80U, FPU_LD_R80_T, g_aFpuLdR80, FPU_R80_IN_TEST_T)
2476TEST_FPU_LOAD(64, RTFLOAT64U, FPU_LD_R64_T, g_aFpuLdR64, FPU_R64_IN_TEST_T)
2477TEST_FPU_LOAD(32, RTFLOAT32U, FPU_LD_R32_T, g_aFpuLdR32, FPU_R32_IN_TEST_T)
2478
2479#ifdef TSTIEMAIMPL_WITH_GENERATOR
2480static void FpuLdMemGenerate(PRTSTREAM pOut, uint32_t cTests)
2481{
2482 RTStrmPrintf(pOut, "\n\n#define HAVE_FPU_LD_MEM\n");
2483 FpuLdR80Generate(pOut, cTests);
2484 FpuLdR64Generate(pOut, cTests);
2485 FpuLdR32Generate(pOut, cTests);
2486}
2487#endif
2488
2489static void FpuLdMemTest(void)
2490{
2491 FpuLdR80Test();
2492 FpuLdR64Test();
2493 FpuLdR32Test();
2494}
2495
2496
2497/*
2498 * Store values to memory.
2499 */
2500
2501#ifndef HAVE_FPU_ST_MEM
2502static FPU_ST_R80_TEST_T const g_aTests_fst_r80_to_r80[] = { {0} };
2503static FPU_ST_R64_TEST_T const g_aTests_fst_r80_to_r64[] = { {0} };
2504static FPU_ST_R32_TEST_T const g_aTests_fst_r80_to_r32[] = { {0} };
2505#endif
2506
2507#ifdef TSTIEMAIMPL_WITH_GENERATOR
2508# define GEN_FPU_STORE(a_cBits, a_rdType, a_aSubTests, a_TestType) \
2509static void FpuStR ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
2510{ \
2511 X86FXSTATE State; \
2512 RT_ZERO(State); \
2513 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2514 { \
2515 RTStrmPrintf(pOut, "static const " #a_TestType " g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
2516 for (uint32_t iTest = 0; iTest < cTests; iTest += 1) \
2517 { \
2518 uint16_t const fFcw = RandU16() & (X86_FCW_MASK_ALL | X86_FCW_PC_MASK); \
2519 State.FSW = RandU16() & (X86_FSW_C_MASK | X86_FSW_XCPT_ES_MASK | X86_FSW_TOP_MASK | X86_FSW_B); \
2520 RTFLOAT80U const InVal = RandR80Src(iTest); \
2521 \
2522 for (uint16_t iRounding = 0; iRounding < 4; iRounding++) \
2523 { \
2524 /* PC doesn't influence these, so leave as is. */ \
2525 AssertCompile(X86_FCW_OM_BIT + 1 == X86_FCW_UM_BIT && X86_FCW_UM_BIT + 1 == X86_FCW_PM_BIT); \
2526 for (uint16_t iMask = 0; iMask < 16; iMask += 2 /*1*/) \
2527 { \
2528 uint16_t uFswOut = 0; \
2529 a_rdType OutVal; \
2530 RT_ZERO(OutVal); \
2531 memset(&OutVal, 0xfe, sizeof(OutVal)); \
2532 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_UM | X86_FCW_PM)) \
2533 | (iRounding << X86_FCW_RC_SHIFT); \
2534 /*if (iMask & 1) State.FCW ^= X86_FCW_MASK_ALL;*/ \
2535 State.FCW |= (iMask >> 1) << X86_FCW_OM_BIT; \
2536 a_aSubTests[iFn].pfn(&State, &uFswOut, &OutVal, &InVal); \
2537 RTStrmPrintf(pOut, " { %#06x, %#06x, %#06x, %s, %s }, /* #%u/%u/%u */\n", \
2538 State.FCW, State.FSW, uFswOut, GenFormatR80(&InVal), \
2539 GenFormatR ## a_cBits(&OutVal), iTest, iRounding, iMask); \
2540 } \
2541 } \
2542 } \
2543 RTStrmPrintf(pOut, "};\n"); \
2544 } \
2545}
2546#else
2547# define GEN_FPU_STORE(a_cBits, a_rdType, a_aSubTests, a_TestType)
2548#endif
2549
2550#define TEST_FPU_STORE(a_cBits, a_rdType, a_SubTestType, a_aSubTests, a_TestType) \
2551typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOR ## a_cBits,(PCX86FXSTATE, uint16_t *, \
2552 PRTFLOAT ## a_cBits ## U, PCRTFLOAT80U)); \
2553typedef FNIEMAIMPLFPUSTR80TOR ## a_cBits *PFNIEMAIMPLFPUSTR80TOR ## a_cBits; \
2554typedef struct a_SubTestType \
2555{ \
2556 const char *pszName; \
2557 PFNIEMAIMPLFPUSTR80TOR ## a_cBits pfn, pfnNative; \
2558 a_TestType const *paTests; \
2559 uint32_t cTests; \
2560 uint32_t uExtra; \
2561 uint8_t idxCpuEflFlavour; \
2562} a_SubTestType; \
2563\
2564static const a_SubTestType a_aSubTests[] = \
2565{ \
2566 ENTRY(RT_CONCAT(fst_r80_to_r,a_cBits)) \
2567}; \
2568GEN_FPU_STORE(a_cBits, a_rdType, a_aSubTests, a_TestType) \
2569\
2570static void FpuStR ## a_cBits ## Test(void) \
2571{ \
2572 X86FXSTATE State; \
2573 RT_ZERO(State); \
2574 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2575 { \
2576 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
2577 \
2578 uint32_t const cTests = a_aSubTests[iFn].cTests; \
2579 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
2580 PFNIEMAIMPLFPUSTR80TOR ## a_cBits pfn = a_aSubTests[iFn].pfn; \
2581 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
2582 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
2583 { \
2584 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
2585 { \
2586 RTFLOAT80U const InVal = paTests[iTest].InVal; \
2587 uint16_t uFswOut = 0; \
2588 a_rdType OutVal; \
2589 RT_ZERO(OutVal); \
2590 memset(&OutVal, 0xfe, sizeof(OutVal)); \
2591 State.FCW = paTests[iTest].fFcw; \
2592 State.FSW = paTests[iTest].fFswIn; \
2593 pfn(&State, &uFswOut, &OutVal, &InVal); \
2594 if ( uFswOut != paTests[iTest].fFswOut \
2595 || !RTFLOAT ## a_cBits ## U_ARE_IDENTICAL(&OutVal, &paTests[iTest].OutVal)) \
2596 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in=%s\n" \
2597 "%s -> fsw=%#06x %s\n" \
2598 "%s expected %#06x %s%s%s (%s)\n", \
2599 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, \
2600 FormatR80(&paTests[iTest].InVal), \
2601 iVar ? " " : "", uFswOut, FormatR ## a_cBits(&OutVal), \
2602 iVar ? " " : "", paTests[iTest].fFswOut, FormatR ## a_cBits(&paTests[iTest].OutVal), \
2603 FswDiff(uFswOut, paTests[iTest].fFswOut), \
2604 !RTFLOAT ## a_cBits ## U_ARE_IDENTICAL(&OutVal, &paTests[iTest].OutVal) ? " - val" : "", \
2605 FormatFcw(paTests[iTest].fFcw) ); \
2606 } \
2607 pfn = a_aSubTests[iFn].pfnNative; \
2608 } \
2609 } \
2610}
2611
2612TEST_FPU_STORE(80, RTFLOAT80U, FPU_ST_R80_T, g_aFpuStR80, FPU_ST_R80_TEST_T)
2613TEST_FPU_STORE(64, RTFLOAT64U, FPU_ST_R64_T, g_aFpuStR64, FPU_ST_R64_TEST_T)
2614TEST_FPU_STORE(32, RTFLOAT32U, FPU_ST_R32_T, g_aFpuStR32, FPU_ST_R32_TEST_T)
2615
2616#ifdef TSTIEMAIMPL_WITH_GENERATOR
2617static void FpuStMemGenerate(PRTSTREAM pOut, uint32_t cTests)
2618{
2619 RTStrmPrintf(pOut, "\n\n#define HAVE_FPU_ST_MEM\n");
2620 FpuStR80Generate(pOut, cTests);
2621 FpuStR64Generate(pOut, cTests);
2622 FpuStR32Generate(pOut, cTests);
2623}
2624#endif
2625
2626static void FpuStMemTest(void)
2627{
2628 FpuStR80Test();
2629 FpuStR64Test();
2630 FpuStR32Test();
2631}
2632
2633
2634int main(int argc, char **argv)
2635{
2636 int rc = RTR3InitExe(argc, &argv, 0);
2637 if (RT_FAILURE(rc))
2638 return RTMsgInitFailure(rc);
2639
2640 /*
2641 * Determin the host CPU.
2642 * If not using the IEMAllAImpl.asm code, this will be set to Intel.
2643 */
2644#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
2645 g_idxCpuEflFlavour = ASMIsAmdCpu() || ASMIsHygonCpu()
2646 ? IEMTARGETCPU_EFL_BEHAVIOR_AMD
2647 : IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
2648#else
2649 g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
2650#endif
2651
2652 /*
2653 * Parse arguments.
2654 */
2655 enum { kModeNotSet, kModeTest, kModeGenerate }
2656 enmMode = kModeNotSet;
2657 bool fInt = true;
2658 bool fFpuLdSt = true;
2659 bool fFpuOther = true;
2660 bool fCpuData = true;
2661 bool fCommonData = true;
2662 uint32_t const cDefaultTests = 96;
2663 uint32_t cTests = cDefaultTests;
2664 RTGETOPTDEF const s_aOptions[] =
2665 {
2666 // mode:
2667 { "--generate", 'g', RTGETOPT_REQ_NOTHING },
2668 { "--test", 't', RTGETOPT_REQ_NOTHING },
2669 // test selection (both)
2670 { "--all", 'a', RTGETOPT_REQ_NOTHING },
2671 { "--none", 'z', RTGETOPT_REQ_NOTHING },
2672 { "--zap", 'z', RTGETOPT_REQ_NOTHING },
2673 { "--fpu-ld-st", 'f', RTGETOPT_REQ_NOTHING },
2674 { "--fpu-load-store", 'f', RTGETOPT_REQ_NOTHING },
2675 { "--fpu-other", 'F', RTGETOPT_REQ_NOTHING },
2676 { "--int", 'i', RTGETOPT_REQ_NOTHING },
2677 // generation parameters
2678 { "--common", 'm', RTGETOPT_REQ_NOTHING },
2679 { "--cpu", 'c', RTGETOPT_REQ_NOTHING },
2680 { "--number-of-tests", 'n', RTGETOPT_REQ_UINT32 },
2681 };
2682
2683 RTGETOPTSTATE State;
2684 rc = RTGetOptInit(&State, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, 0);
2685 AssertRCReturn(rc, RTEXITCODE_FAILURE);
2686
2687 RTGETOPTUNION ValueUnion;
2688 while ((rc = RTGetOpt(&State, &ValueUnion)))
2689 {
2690 switch (rc)
2691 {
2692 case 'g':
2693 enmMode = kModeGenerate;
2694 break;
2695 case 't':
2696 enmMode = kModeTest;
2697 break;
2698 case 'a':
2699 fCpuData = true;
2700 fCommonData = true;
2701 fInt = true;
2702 fFpuLdSt = true;
2703 fFpuOther = true;
2704 break;
2705 case 'z':
2706 fCpuData = false;
2707 fCommonData = false;
2708 fInt = false;
2709 fFpuLdSt = false;
2710 fFpuOther = false;
2711 break;
2712 case 'f':
2713 fFpuLdSt = true;
2714 break;
2715 case 'F':
2716 fFpuOther = true;
2717 break;
2718 case 'i':
2719 fInt = true;
2720 break;
2721 case 'm':
2722 fCommonData = true;
2723 break;
2724 case 'c':
2725 fCpuData = true;
2726 break;
2727 case 'n':
2728 cTests = ValueUnion.u32;
2729 break;
2730 case 'h':
2731 RTPrintf("usage: %s <-g|-t> [options]\n"
2732 "\n"
2733 "Mode:\n"
2734 " -g, --generate\n"
2735 " Generate test data.\n"
2736 " -t, --test\n"
2737 " Execute tests.\n"
2738 "\n"
2739 "Test selection (both modes):\n"
2740 " -a, --all\n"
2741 " Enable all tests and generated test data. (default)\n"
2742 " -z, --zap, --none\n"
2743 " Disable all tests and test data types.\n"
2744 " -i, --int\n"
2745 " Enable non-FPU tests.\n"
2746 " -f, --fpu-ld-st\n"
2747 " Enable FPU load and store tests.\n"
2748 " -f, --fpu-other\n"
2749 " Enable other FPU tests.\n"
2750 "\n"
2751 "Generation:\n"
2752 " -m, --common\n"
2753 " Enable generating common test data.\n"
2754 " -c, --only-cpu\n"
2755 " Enable generating CPU specific test data.\n"
2756 " -n, --number-of-test <count>\n"
2757 " Number of tests to generate. Default: %u\n"
2758 , argv[0], cDefaultTests);
2759 return RTEXITCODE_SUCCESS;
2760 default:
2761 return RTGetOptPrintError(rc, &ValueUnion);
2762 }
2763 }
2764
2765 /*
2766 * Generate data?
2767 */
2768 if (enmMode == kModeGenerate)
2769 {
2770#ifdef TSTIEMAIMPL_WITH_GENERATOR
2771 char szCpuDesc[256] = {0};
2772 RTMpGetDescription(NIL_RTCPUID, szCpuDesc, sizeof(szCpuDesc));
2773 const char * const pszCpuType = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "Amd" : "Intel";
2774 const char * const pszCpuSuff = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_Amd" : "_Intel";
2775 const char * const pszCpuSuffU = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_AMD" : "_INTEL";
2776# if defined(RT_OS_WINDOWS) || defined(RT_OS_OS2)
2777 const char * const pszBitBucket = "NUL";
2778# else
2779 const char * const pszBitBucket = "/dev/null";
2780# endif
2781
2782 if (cTests == 0)
2783 cTests = cDefaultTests;
2784 g_cZeroDstTests = RT_MIN(cTests / 16, 32);
2785 g_cZeroSrcTests = g_cZeroDstTests * 2;
2786
2787 if (fInt)
2788 {
2789 const char *pszDataFile = fCommonData ? "tstIEMAImplData.h" : pszBitBucket;
2790 PRTSTREAM pStrmData = NULL;
2791 rc = RTStrmOpen(pszDataFile, "w", &pStrmData);
2792 if (!pStrmData)
2793 return RTMsgErrorExitFailure("Failed to open %s for writing: %Rrc", pszDataFile, rc);
2794
2795 const char *pszDataCpuFile = !fCpuData ? pszBitBucket : g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD
2796 ? "tstIEMAImplData-Amd.h" : "tstIEMAImplData-Intel.h";
2797 PRTSTREAM pStrmDataCpu = NULL;
2798 rc = RTStrmOpen(pszDataCpuFile, "w", &pStrmDataCpu);
2799 if (!pStrmData)
2800 return RTMsgErrorExitFailure("Failed to open %s for writing: %Rrc", pszDataCpuFile, rc);
2801
2802 GenerateHeader(pStrmData, "", szCpuDesc, NULL, "");
2803 GenerateHeader(pStrmDataCpu, "", szCpuDesc, pszCpuType, pszCpuSuff);
2804
2805 BinU8Generate( pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
2806 BinU16Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
2807 BinU32Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
2808 BinU64Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
2809 ShiftDblGenerate(pStrmDataCpu, pszCpuSuffU, RT_MAX(cTests, 128));
2810 UnaryGenerate(pStrmData, cTests);
2811 ShiftGenerate(pStrmDataCpu, pszCpuSuffU, cTests);
2812 MulDivGenerate(pStrmDataCpu, pszCpuSuffU, cTests);
2813
2814 RTEXITCODE rcExit = GenerateFooterAndClose(pStrmDataCpu, pszDataCpuFile, "", pszCpuSuff,
2815 GenerateFooterAndClose(pStrmData, pszDataFile, "", "",
2816 RTEXITCODE_SUCCESS));
2817 if (rcExit != RTEXITCODE_SUCCESS)
2818 return rcExit;
2819 }
2820
2821 if (fFpuLdSt)
2822 {
2823 const char *pszDataFile = fCommonData ? "tstIEMAImplDataFpuLdSt.h" : pszBitBucket;
2824 PRTSTREAM pStrmData = NULL;
2825 rc = RTStrmOpen(pszDataFile, "w", &pStrmData);
2826 if (!pStrmData)
2827 return RTMsgErrorExitFailure("Failed to open %s for writing: %Rrc", pszDataFile, rc);
2828
2829 const char *pszDataCpuFile = !fCpuData ? pszBitBucket : g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD
2830 ? "tstIEMAImplDataFpuLdSt-Amd.h" : "tstIEMAImplDataFpuLdSt-Intel.h";
2831 PRTSTREAM pStrmDataCpu = NULL;
2832 rc = RTStrmOpen(pszDataCpuFile, "w", &pStrmDataCpu);
2833 if (!pStrmData)
2834 return RTMsgErrorExitFailure("Failed to open %s for writing: %Rrc", pszDataCpuFile, rc);
2835
2836 GenerateHeader(pStrmData, "Fpu", szCpuDesc, NULL, "");
2837 GenerateHeader(pStrmDataCpu, "Fpu", szCpuDesc, pszCpuType, pszCpuSuff);
2838
2839 FpuLdConstGenerate(pStrmData, cTests);
2840 cTests = RT_MAX(cTests, 384); /* need better coverage for the next ones. */
2841 FpuLdMemGenerate(pStrmData, cTests);
2842 FpuStMemGenerate(pStrmData, cTests);
2843
2844 RTEXITCODE rcExit = GenerateFooterAndClose(pStrmDataCpu, pszDataCpuFile, "Fpu", pszCpuSuff,
2845 GenerateFooterAndClose(pStrmData, pszDataFile, "Fpu", "",
2846 RTEXITCODE_SUCCESS));
2847 if (rcExit != RTEXITCODE_SUCCESS)
2848 return rcExit;
2849 }
2850
2851 if (fFpuOther)
2852 {
2853# if 0
2854 const char *pszDataFile = fCommonData ? "tstIEMAImplDataFpuOther.h" : pszBitBucket;
2855 PRTSTREAM pStrmData = NULL;
2856 rc = RTStrmOpen(pszDataFile, "w", &pStrmData);
2857 if (!pStrmData)
2858 return RTMsgErrorExitFailure("Failed to open %s for writing: %Rrc", pszDataFile, rc);
2859
2860 const char *pszDataCpuFile = !fCpuData ? pszBitBucket : g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD
2861 ? "tstIEMAImplDataFpuOther-Amd.h" : "tstIEMAImplDataFpuOther-Intel.h";
2862 PRTSTREAM pStrmDataCpu = NULL;
2863 rc = RTStrmOpen(pszDataCpuFile, "w", &pStrmDataCpu);
2864 if (!pStrmData)
2865 return RTMsgErrorExitFailure("Failed to open %s for writing: %Rrc", pszDataCpuFile, rc);
2866
2867 GenerateHeader(pStrmData, "Fpu", szCpuDesc, NULL, "");
2868 GenerateHeader(pStrmDataCpu, "Fpu", szCpuDesc, pszCpuType, pszCpuSuff);
2869
2870 /* later */
2871
2872 RTEXITCODE rcExit = GenerateFooterAndClose(pStrmDataCpu, pszDataCpuFile, "Fpu", pszCpuSuff,
2873 GenerateFooterAndClose(pStrmData, pszDataFile, "Fpu", "",
2874 RTEXITCODE_SUCCESS));
2875 if (rcExit != RTEXITCODE_SUCCESS)
2876 return rcExit;
2877# endif
2878 }
2879
2880 return RTEXITCODE_SUCCESS;
2881#else
2882 return RTMsgErrorExitFailure("Test data generator not compiled in!");
2883#endif
2884 }
2885
2886 /*
2887 * Do testing. Currrently disabled by default as data needs to be checked
2888 * on both intel and AMD systems first.
2889 */
2890 rc = RTTestCreate("tstIEMAimpl", &g_hTest);
2891 AssertRCReturn(rc, RTEXITCODE_FAILURE);
2892 if (enmMode == kModeTest)
2893 {
2894 RTTestBanner(g_hTest);
2895
2896 /* Allocate guarded memory for use in the tests. */
2897#define ALLOC_GUARDED_VAR(a_puVar) do { \
2898 rc = RTTestGuardedAlloc(g_hTest, sizeof(*a_puVar), sizeof(*a_puVar), false /*fHead*/, (void **)&a_puVar); \
2899 if (RT_FAILURE(rc)) RTTestFailed(g_hTest, "Failed to allocate guarded mem: " #a_puVar); \
2900 } while (0)
2901 ALLOC_GUARDED_VAR(g_pu8);
2902 ALLOC_GUARDED_VAR(g_pu16);
2903 ALLOC_GUARDED_VAR(g_pu32);
2904 ALLOC_GUARDED_VAR(g_pu64);
2905 ALLOC_GUARDED_VAR(g_pu128);
2906 ALLOC_GUARDED_VAR(g_pu8Two);
2907 ALLOC_GUARDED_VAR(g_pu16Two);
2908 ALLOC_GUARDED_VAR(g_pu32Two);
2909 ALLOC_GUARDED_VAR(g_pu64Two);
2910 ALLOC_GUARDED_VAR(g_pu128Two);
2911 ALLOC_GUARDED_VAR(g_pfEfl);
2912 if (RTTestErrorCount(g_hTest) == 0)
2913 {
2914 if (fInt)
2915 {
2916 BinU8Test();
2917 BinU16Test();
2918 BinU32Test();
2919 BinU64Test();
2920 XchgTest();
2921 XaddTest();
2922 CmpXchgTest();
2923 CmpXchg8bTest();
2924 CmpXchg16bTest();
2925 ShiftDblTest();
2926 UnaryTest();
2927 ShiftTest();
2928 MulDivTest();
2929 BswapTest();
2930 }
2931
2932 if (fFpuLdSt)
2933 {
2934 FpuLoadConstTest();
2935 FpuLdMemTest();
2936 FpuStMemTest();
2937 }
2938 }
2939 return RTTestSummaryAndDestroy(g_hTest);
2940 }
2941 return RTTestSkipAndDestroy(g_hTest, "unfinished testcase");
2942}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette