Changeset 104238 in vbox
- Timestamp:
- Apr 8, 2024 8:15:10 PM (8 months ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/iprt/asmdefs-arm.h
r104230 r104238 40 40 #endif 41 41 42 #include <iprt/cdefs.h> 43 44 42 45 #if !defined(RT_ARCH_ARM64) && !defined(RT_ARCH_ARM32) 43 46 # error "Not on ARM64 or ARM32" … … 48 51 * @{ 49 52 */ 53 54 /** 55 * Align code, pad with BRK. */ 56 #define ALIGNCODE(alignment) .balignl alignment, 0xd42000cc 57 58 /** 59 * Align data, pad with ZEROs. */ 60 #define ALIGNDATA(alignment) .balign alignment 61 62 /** 63 * Align BSS, pad with ZEROs. */ 64 #define ALIGNBSS(alignment) .balign alignment 65 50 66 51 67 /** Marks the beginning of a code section. */ … … 172 188 */ 173 189 .macro BEGINPROC, a_Name 174 .p2align 2175 .globl NAME(\a_Name)176 190 NAME(\a_Name): 177 191 .endm … … 184 198 */ 185 199 .macro BEGINPROC_HIDDEN, a_Name 186 .p2align 2187 200 #ifndef ASM_FORMAT_ELF 188 .private_extern NAME(\a_Name)189 #else 190 .hidden NAME(\a_Name)191 #endif 192 .globl NAME(\a_Name)201 .private_extern NAME(\a_Name) 202 #else 203 .hidden NAME(\a_Name) 204 #endif 205 .globl NAME(\a_Name) 193 206 NAME(\a_Name): 194 207 .endm -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl-arm64.S
r104231 r104238 32 32 #include <iprt/asmdefs-arm.h> 33 33 #include <iprt/x86.h> 34 35 #define IEM_AIMPL_FUNCTION_ALIGNMENT 0x20 34 36 35 37 … … 149 151 BEGINCODE 150 152 151 BEGINPROC_HIDDEN iemAImpl_placeholder 152 brk #1 153 ret 153 154 154 155 155 /* Some sketches. … … 182 182 183 183 /* uint32_t iemAImpl_cmp_u8(uint32_t fEFlags, uint8_t const *puDst, uint8_t uSrc); */ 184 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 184 185 BEGINPROC_HIDDEN iemAImpl_sub_u8 185 186 .cfi_startproc … … 208 209 209 210 /* uint32_t iemAImpl_cmp_u16(uint32_t fEFlags, uint16_t const *puDst, uint16_t uSrc); */ 211 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 210 212 BEGINPROC_HIDDEN iemAImpl_sub_u16 211 213 .cfi_startproc … … 234 236 235 237 /* uint32_t iemAImpl_cmp_u32(uint32_t fEFlags, uint32_t const *puDst, uint32_t uSrc); */ 238 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 236 239 BEGINPROC_HIDDEN iemAImpl_sub_u32 237 240 .cfi_startproc … … 295 298 296 299 /* uint32_t iemAImpl_cmp_u64(uint32_t fEFlags, uint64_t const *puDst, uint64_t uSrc); */ 300 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 297 301 BEGINPROC_HIDDEN iemAImpl_sub_u64 298 302 .cfi_startproc … … 314 318 */ 315 319 316 /* void iemAImpl_shl_u8(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags); */317 /* void iemAImpl_shl_u16(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags); */318 /* void iemAImpl_shl_u32(uint16_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags); */320 /* uint32_t iemAImpl_shl_u8( uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t cShift); */ 321 /* uint32_t iemAImpl_shl_u16(uint32_t fEFlagsIn, uint16_t *pu16Dst, uint8_t cShift); */ 322 /* uint32_t iemAImpl_shl_u32(uint32_t fEFlagsIn, uint16_t *pu32Dst, uint8_t cShift); */ 319 323 .macro SHL_8_16_32, a_Name, a_cBits, a_fIntelFlags, a_LdStSuff 324 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 320 325 BEGINPROC_HIDDEN \a_Name 321 326 .cfi_startproc 322 327 323 328 /* Do we need to shift anything at all? */ 324 and w 1, w1, #0x1f325 cbz w 1, 99f329 and w2, w2, #0x1f 330 cbz w2, 99f 326 331 327 332 /* 328 333 * Do the shifting 329 334 */ 330 ldr\a_LdStSuff w8, [x 0]335 ldr\a_LdStSuff w8, [x1] 331 336 .ifne \a_cBits < 32 332 lslv w9, w8, w 1333 .else 334 lslv x9, x8, x 1/* use 64-bit registers here so we get CF for free. We know x1 != 0. */335 .endif 336 str\a_LdStSuff w9, [x 0]337 lslv w9, w8, w2 338 .else 339 lslv x9, x8, x2 /* use 64-bit registers here so we get CF for free. We know x1 != 0. */ 340 .endif 341 str\a_LdStSuff w9, [x1] 337 342 338 343 /* 339 344 * Calculate EFLAGS. 340 345 */ 341 ldr w10, [x2] /* w10 = eflags; CF=0 PF=2 AF=4 ZF=6 SF=7 OF=11 */ 342 343 CALC_EFLAGS_PARITY w10, w9, w12 346 CALC_EFLAGS_PARITY w0, w9, w12 344 347 345 348 .ifne \a_cBits < 32 … … 351 354 mrs x11, NZCV 352 355 lsr w11, w11, #30 /* N=1; Z=0 */ 353 bfi w 10, w11, X86_EFL_ZF_BIT, 2/* EFLAGS.ZF and EFLAGS.SF */356 bfi w0, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 354 357 #else 355 358 cset x11, eq 356 bfi w 10, w11, X86_EFL_ZF_BIT, 1359 bfi w0, w11, X86_EFL_ZF_BIT, 1 357 360 cset x12, pl 358 bfi w 10, w12, X86_EFL_SF_BIT, 1361 bfi w0, w12, X86_EFL_SF_BIT, 1 359 362 #endif 360 363 361 364 .ifne \a_cBits < 32 362 bfxil w 10, w9, #\a_cBits, #1/* w9 bit 8/16 contains carry. (X86_EFL_CF_BIT == 0) */363 .else 364 bfxil x 10, x9, #\a_cBits, #1/* x9 bit 32 contains carry. (X86_EFL_CF_BIT == 0) */365 bfxil w0, w9, #\a_cBits, #1 /* w9 bit 8/16 contains carry. (X86_EFL_CF_BIT == 0) */ 366 .else 367 bfxil x0, x9, #\a_cBits, #1 /* x9 bit 32 contains carry. (X86_EFL_CF_BIT == 0) */ 365 368 .endif 366 369 … … 369 372 eor w11, w8, w8, LSL #1 370 373 lsr w11, w11, #(\a_cBits - 1) 371 bfi w 10, w11, #X86_EFL_OF_BIT, #1372 373 and w 10, w10, ~X86_EFL_AF/* AF is cleared */374 bfi w0, w11, #X86_EFL_OF_BIT, #1 375 376 and w0, w0, ~X86_EFL_AF /* AF is cleared */ 374 377 .else 375 378 /* AMD: OF = last bit shifted: fEfl |= ((uResult >> (cOpBits - 1)) ^ fCarry) << X86_EFL_OF_BIT; */ … … 381 384 lsr x11, x11, #(\a_cBits - 1) 382 385 .endif 383 bfi w10, w11, #X86_EFL_OF_BIT, #1 384 385 orr w10, w10, X86_EFL_AF /* AF is set */ 386 .endif 387 388 str w10, [x2] 386 bfi w0, w11, #X86_EFL_OF_BIT, #1 387 388 orr w0, w0, X86_EFL_AF /* AF is set */ 389 .endif 390 389 391 99: 390 392 ret … … 405 407 406 408 /** @todo this is slightly slower than the C version (release) on an M2. Investigate why. */ 407 /* void iemAImpl_shl_u64(uint16_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags); */409 /* uint32_t iemAImpl_shl_u64(uint32_t fEFlagsIn, uint16_t *pu64Dst, uint8_t cShift); */ 408 410 .macro SHL_64, a_Name, a_fIntelFlags 411 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 409 412 BEGINPROC_HIDDEN \a_Name 410 413 .cfi_startproc 411 414 412 415 /* Do we need to shift anything at all? */ 413 and w 1, w1, #0x3f414 cbz w 1, 99f416 and w2, w2, #0x3f 417 cbz w2, 99f 415 418 416 419 /* 417 420 * Do the shifting 418 421 */ 419 ldr x8, [x 0]420 lslv x9, x8, x 1421 str x9, [x 0]422 ldr x8, [x1] 423 lslv x9, x8, x2 424 str x9, [x1] 422 425 423 426 /* 424 427 * Calculate EFLAGS. 425 428 */ 426 ldr w10, [x2] /* w10 = eflags; CF=0 PF=2 AF=4 ZF=6 SF=7 OF=11 */ 427 428 CALC_EFLAGS_PARITY w10, w9, w11 429 CALC_EFLAGS_PARITY w0, w9, w11 429 430 430 431 ands xzr, x9, x9 /* Sets NZ */ 431 432 mrs x11, NZCV 432 433 lsr w11, w11, #30 /* N=1; Z=0 */ 433 bfi w 10, w11, X86_EFL_ZF_BIT, 2/* EFLAGS.ZF and EFLAGS.SF */434 435 neg w11, w 1/* the shift count is MODed by the data size, so this is safe. */434 bfi w0, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 435 436 neg w11, w2 /* the shift count is MODed by the data size, so this is safe. */ 436 437 lsrv x11, x8, x11 437 bfi w 10, w11, X86_EFL_CF_BIT, 1438 bfi w0, w11, X86_EFL_CF_BIT, 1 438 439 439 440 .ifne \a_fIntelFlags … … 441 442 eor x11, x8, x8, LSL #1 442 443 lsr x11, x11, #63 443 bfi w 10, w11, #X86_EFL_OF_BIT, #1444 445 and w 10, w10, ~X86_EFL_AF/* AF is cleared */444 bfi w0, w11, #X86_EFL_OF_BIT, #1 445 446 and w0, w0, ~X86_EFL_AF /* AF is cleared */ 446 447 .else 447 448 /* AMD: OF = last bit shifted: fEfl |= ((uResult >> (cOpBits - 1)) ^ fCarry) << X86_EFL_OF_BIT; */ 448 449 eor x11, x11, x9, LSR #63 /* w11[0]=CF from above */ 449 bfi w10, w11, #X86_EFL_OF_BIT, #1 450 451 orr w10, w10, X86_EFL_AF /* AF is set */ 452 .endif 453 str w10, [x2] 450 bfi w0, w11, #X86_EFL_OF_BIT, #1 451 452 orr w0, w0, X86_EFL_AF /* AF is set */ 453 .endif 454 454 99: 455 455 ret … … 466 466 */ 467 467 468 /* void iemAImpl_shr_u8(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags); */469 /* void iemAImpl_shr_u16(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags); */470 /* void iemAImpl_shr_u32(uint16_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags); */468 /* uint32_t iemAImpl_shr_u8( uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t cShift); */ 469 /* uint32_t iemAImpl_shr_u16(uint32_t fEFlagsIn, uint16_t *pu16Dst, uint8_t cShift); */ 470 /* uint32_t iemAImpl_shr_u32(uint32_t fEFlagsIn, uint16_t *pu32Dst, uint8_t cShift); */ 471 471 .macro shr_8_16_32, a_Name, a_cBits, a_fIntelFlags, a_LdStSuff 472 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 472 473 BEGINPROC_HIDDEN \a_Name 473 474 .cfi_startproc 474 475 475 476 /* Do we need to shift anything at all? */ 476 and w1, w1, #0x1f 477 cbz w1, 99f 478 479 /* Load EFLAGS before we start the calculation. */ 480 ldr w10, [x2] /* w10 = eflags; CF=0 PF=2 AF=4 ZF=6 SF=7 OF=11 */ 477 and w2, w2, #0x1f 478 cbz w2, 99f 481 479 482 480 /* 483 481 * Do the shifting. 484 482 */ 485 ldr\a_LdStSuff w8, [x 0]486 lsrv w9, w8, w 1487 str\a_LdStSuff w9, [x 0]483 ldr\a_LdStSuff w8, [x1] 484 lsrv w9, w8, w2 485 str\a_LdStSuff w9, [x1] 488 486 489 487 /* 490 488 * Calculate EFLAGS. 491 489 */ 492 sub w11, w 1, #1490 sub w11, w2, #1 493 491 lsrv w11, w8, w11 494 bfxil w 10, w11, #X86_EFL_CF_BIT, #1492 bfxil w0, w11, #X86_EFL_CF_BIT, #1 495 493 496 494 .ifne \a_fIntelFlags 497 and w 10, w10, ~X86_EFL_AF/* AF is cleared */495 and w0, w0, ~X86_EFL_AF /* AF is cleared */ 498 496 /* Intel: OF = one bit shift: fEfl |= X86_EFL_GET_OF_ ## cOpBits(uDstIn); */ 499 497 lsr w11, w8, #(\a_cBits - 1) 500 bfi w 10, w11, #X86_EFL_OF_BIT, #1501 .else 502 orr w 10, w10, X86_EFL_AF/* AF is set */498 bfi w0, w11, #X86_EFL_OF_BIT, #1 499 .else 500 orr w0, w0, X86_EFL_AF /* AF is set */ 503 501 /* AMD: OF = last bits shifted: fEfl |= (uResult >> (cOpBits - 2)) << X86_EFL_OF_BIT; */ 504 502 lsr w11, w9, #(\a_cBits - 2) 505 bfi w 10, w11, #X86_EFL_OF_BIT, #1506 .endif 507 508 CALC_EFLAGS_PARITY w 10, w9, w11503 bfi w0, w11, #X86_EFL_OF_BIT, #1 504 .endif 505 506 CALC_EFLAGS_PARITY w0, w9, w11 509 507 510 508 .ifne \a_cBits < 32 … … 515 513 mrs x11, NZCV 516 514 lsr w11, w11, #30 /* N=1; Z=0 */ 517 bfi w10, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 518 519 str w10, [x2] 515 bfi w0, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 516 520 517 99: 521 518 ret … … 538 535 /* void iemAImpl_shr_u64(uint16_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags); */ 539 536 .macro shr_64, a_Name, a_fIntelFlags 537 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 540 538 BEGINPROC_HIDDEN \a_Name 541 539 .cfi_startproc 542 540 543 541 /* Do we need to shift anything at all? */ 544 ands w 1, w1, #0x3f542 ands w2, w2, #0x3f 545 543 b.eq 99f 546 544 547 /* Load EFLAGS before we start the calculation. */548 ldr w10, [x2] /* w10 = eflags; CF=0 PF=2 AF=4 ZF=6 SF=7 OF=11 */549 550 545 /* 551 546 * Do the shifting 552 547 */ 553 ldr x8, [x 0]554 lsrv x9, x8, x 1555 str x9, [x 0]548 ldr x8, [x1] 549 lsrv x9, x8, x2 550 str x9, [x1] 556 551 557 552 /* 558 553 * Calculate EFLAGS. 559 554 */ 560 sub w11, w 1, #1555 sub w11, w2, #1 561 556 lsrv x11, x8, x11 562 bfxil w 10, w11, #X86_EFL_CF_BIT, #1557 bfxil w0, w11, #X86_EFL_CF_BIT, #1 563 558 564 559 .ifne \a_fIntelFlags 565 and w 10, w10, ~X86_EFL_AF/* AF is cleared */560 and w0, w0, ~X86_EFL_AF /* AF is cleared */ 566 561 /* Intel: OF = one bit shift: fEfl |= X86_EFL_GET_OF_ ## cOpBits(uDstIn); */ 567 562 lsr x11, x8, #63 568 bfi w 10, w11, #X86_EFL_OF_BIT, #1569 .else 570 orr w 10, w10, X86_EFL_AF/* AF is set */563 bfi w0, w11, #X86_EFL_OF_BIT, #1 564 .else 565 orr w0, w0, X86_EFL_AF /* AF is set */ 571 566 /* AMD: OF = last bits shifted: fEfl |= (uResult >> (cOpBits - 2)) << X86_EFL_OF_BIT; */ 572 567 lsr x11, x9, #62 573 bfi w 10, w11, #X86_EFL_OF_BIT, #1574 .endif 575 576 CALC_EFLAGS_PARITY w 10, w9, w11568 bfi w0, w11, #X86_EFL_OF_BIT, #1 569 .endif 570 571 CALC_EFLAGS_PARITY w0, w9, w11 577 572 578 573 ands xzr, x9, x9 /* Sets NZ */ 579 574 mrs x11, NZCV 580 575 lsr w11, w11, #30 /* N=1; Z=0 */ 581 bfi w10, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 582 583 str w10, [x2] 576 bfi w0, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 577 584 578 99: 585 579 ret … … 596 590 */ 597 591 598 /* void iemAImpl_sar_u8(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags); */599 /* void iemAImpl_sar_u16(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags); */600 /* void iemAImpl_sar_u32(uint16_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags); */592 /* uint32_t iemAImpl_sar_u8( uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t cShift); */ 593 /* uint32_t iemAImpl_sar_u16(uint32_t fEFlagsIn, uint16_t *pu16Dst, uint8_t cShift); */ 594 /* uint32_t iemAImpl_sar_u32(uint32_t fEFlagsIn, uint16_t *pu32Dst, uint8_t cShift); */ 601 595 .macro sar_8_16_32, a_Name, a_cBits, a_fIntelFlags, a_LdSuff, a_StSuff 596 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 602 597 BEGINPROC_HIDDEN \a_Name 603 598 .cfi_startproc 604 599 605 600 /* Do we need to shift anything at all? */ 606 and w1, w1, #0x1f 607 cbz w1, 99f 608 609 /* Load EFLAGS before we start the calculation. */ 610 ldr w10, [x2] /* w10 = eflags; CF=0 PF=2 AF=4 ZF=6 SF=7 OF=11 */ 601 and w2, w2, #0x1f 602 cbz w2, 99f 611 603 612 604 /* 613 605 * Do the shifting. 614 606 */ 615 ldr\a_LdSuff w8, [x 0] /* Sign-extending for 8 and 16 bits! */616 asrv w9, w8, w 1617 str\a_StSuff w9, [x 0]607 ldr\a_LdSuff w8, [x1] /* Sign-extending for 8 and 16 bits! */ 608 asrv w9, w8, w2 609 str\a_StSuff w9, [x1] 618 610 619 611 /* 620 612 * Calculate EFLAGS. 621 613 */ 622 sub w11, w 1, #1614 sub w11, w2, #1 623 615 lsrv w11, w8, w11 624 bfxil w 10, w11, #X86_EFL_CF_BIT, #1616 bfxil w0, w11, #X86_EFL_CF_BIT, #1 625 617 626 618 .ifne \a_fIntelFlags 627 619 mov w11, ~(X86_EFL_AF | X86_EFL_OF) 628 and w 10, w10, w11/* AF and OF are cleared */629 .else 630 orr w 10, w10, X86_EFL_AF/* AF is set */631 and w 10, w10, ~X86_EFL_OF/* OF is cleared */632 .endif 633 634 CALC_EFLAGS_PARITY w 10, w9, w11620 and w0, w0, w11 /* AF and OF are cleared */ 621 .else 622 orr w0, w0, X86_EFL_AF /* AF is set */ 623 and w0, w0, ~X86_EFL_OF /* OF is cleared */ 624 .endif 625 626 CALC_EFLAGS_PARITY w0, w9, w11 635 627 636 628 .ifne \a_cBits < 32 … … 641 633 mrs x11, NZCV 642 634 lsr w11, w11, #30 /* N=1; Z=0 */ 643 bfi w10, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 644 645 str w10, [x2] 635 bfi w0, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 636 646 637 99: 647 638 ret … … 662 653 663 654 /** @todo this is slightly slower than the C version (release) on an M2. Investigate why. */ 664 /* void iemAImpl_sar_u64(uint16_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags); */655 /* uint32_t iemAImpl_sar_u64(uint32_t fEFlagsIn, uint16_t *pu64Dst, uint8_t cShift); */ 665 656 .macro sar_64, a_Name, a_fIntelFlags 657 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 666 658 BEGINPROC_HIDDEN \a_Name 667 659 .cfi_startproc 668 660 669 661 /* Do we need to shift anything at all? */ 670 ands w 1, w1, #0x3f662 ands w2, w2, #0x3f 671 663 b.eq 99f 672 664 673 /* Load EFLAGS before we start the calculation. */674 ldr w10, [x2] /* w10 = eflags; CF=0 PF=2 AF=4 ZF=6 SF=7 OF=11 */675 676 665 /* 677 666 * Do the shifting 678 667 */ 679 ldr x8, [x 0]680 asrv x9, x8, x 1681 str x9, [x 0]668 ldr x8, [x1] 669 asrv x9, x8, x2 670 str x9, [x1] 682 671 683 672 /* 684 673 * Calculate EFLAGS. 685 674 */ 686 sub w11, w 1, #1675 sub w11, w2, #1 687 676 lsrv x11, x8, x11 688 bfxil w 10, w11, #X86_EFL_CF_BIT, #1677 bfxil w0, w11, #X86_EFL_CF_BIT, #1 689 678 690 679 .ifne \a_fIntelFlags 691 680 mov w11, ~(X86_EFL_AF | X86_EFL_OF) 692 and w 10, w10, w11/* AF and OF are cleared */693 .else 694 orr w 10, w10, X86_EFL_AF/* AF is set */695 and w 10, w10, ~X86_EFL_OF/* OF is cleared */696 .endif 697 698 CALC_EFLAGS_PARITY w 10, w9, w11681 and w0, w0, w11 /* AF and OF are cleared */ 682 .else 683 orr w0, w0, X86_EFL_AF /* AF is set */ 684 and w0, w0, ~X86_EFL_OF /* OF is cleared */ 685 .endif 686 687 CALC_EFLAGS_PARITY w0, w9, w11 699 688 700 689 ands xzr, x9, x9 /* Sets NZ */ 701 690 mrs x11, NZCV 702 691 lsr w11, w11, #30 /* N=1; Z=0 */ 703 bfi w10, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 704 705 str w10, [x2] 692 bfi w0, w11, X86_EFL_ZF_BIT, 2 /* EFLAGS.ZF and EFLAGS.SF */ 693 706 694 99: 707 695 ret -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r104209 r104238 2361 2361 ; @param 4 Force load flags. 2362 2362 ; 2363 ; Makes ASSUMPTIONS about A0, A1 and A2 assignments. 2363 ; Makes ASSUMPTIONS about A0, A1 and A2 assignments. Specifically, that with 2364 ; GCC/64 we're free to use RCX/CL as it isn't used for any arguments. While 2365 ; MSC/64 & 32-bit fastcall are using ECX for the first argument (fEFlagsIn), 2366 ; so we have to switch it around with the shift count parameter registers. 2364 2367 ; 2365 2368 ; @note the _intel and _amd variants are implemented in C. … … 2369 2372 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 12 2370 2373 PROLOGUE_3_ARGS 2371 IEM_MAYBE_LOAD_FLAGS_OLD A2, %2, %3, %42372 2374 %ifdef ASM_CALL64_GCC 2373 mov cl, A1_8 2374 %1 byte [A0], cl 2375 IEM_MAYBE_LOAD_FLAGS A0_32, %2, %3, %4 2376 mov cl, A2_8 2377 %1 byte [A1], cl 2378 IEM_SAVE_FLAGS_RETVAL A0_32, %2, %3, 0 2375 2379 %else 2376 xchg A1, A0 2380 xchg A2, A0 2381 IEM_MAYBE_LOAD_FLAGS A2_32, %2, %3, %4 2377 2382 %1 byte [A1], cl 2383 IEM_SAVE_FLAGS_RETVAL A2_32, %2, %3, 0 2378 2384 %endif 2379 IEM_SAVE_FLAGS_OLD A2, %2, %3, 02380 2385 .zero_shift: 2381 2386 EPILOGUE_3_ARGS … … 2384 2389 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 12 2385 2390 PROLOGUE_3_ARGS 2386 IEM_MAYBE_LOAD_FLAGS_OLD A2, %2, %3, %42387 2391 %ifdef ASM_CALL64_GCC 2388 mov cl, A1_8 2389 %1 word [A0], cl 2392 IEM_MAYBE_LOAD_FLAGS A0_32, %2, %3, %4 2393 mov cl, A2_8 2394 %1 word [A1], cl 2395 IEM_SAVE_FLAGS_RETVAL A0_32, %2, %3, 0 2390 2396 %else 2391 xchg A1, A0 2397 xchg A2, A0 2398 IEM_MAYBE_LOAD_FLAGS A2_32, %2, %3, %4 2392 2399 %1 word [A1], cl 2400 IEM_SAVE_FLAGS_RETVAL A2_32, %2, %3, 0 2393 2401 %endif 2394 IEM_SAVE_FLAGS_OLD A2, %2, %3, 02395 2402 EPILOGUE_3_ARGS 2396 2403 ENDPROC iemAImpl_ %+ %1 %+ _u16 … … 2398 2405 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 12 2399 2406 PROLOGUE_3_ARGS 2400 IEM_MAYBE_LOAD_FLAGS_OLD A2, %2, %3, %42401 2407 %ifdef ASM_CALL64_GCC 2402 mov cl, A1_8 2403 %1 dword [A0], cl 2408 IEM_MAYBE_LOAD_FLAGS A0_32, %2, %3, %4 2409 mov cl, A2_8 2410 %1 dword [A1], cl 2411 IEM_SAVE_FLAGS_RETVAL A0_32, %2, %3, 0 2404 2412 %else 2405 xchg A1, A0 2413 xchg A2, A0 2414 IEM_MAYBE_LOAD_FLAGS A2_32, %2, %3, %4 2406 2415 %1 dword [A1], cl 2416 IEM_SAVE_FLAGS_RETVAL A2_32, %2, %3, 0 2407 2417 %endif 2408 IEM_SAVE_FLAGS_OLD A2, %2, %3, 02409 2418 EPILOGUE_3_ARGS 2410 2419 ENDPROC iemAImpl_ %+ %1 %+ _u32 … … 2413 2422 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 2414 2423 PROLOGUE_3_ARGS 2415 IEM_MAYBE_LOAD_FLAGS_OLD A2, %2, %3, %42416 2424 %ifdef ASM_CALL64_GCC 2417 mov cl, A1_8 2418 %1 qword [A0], cl 2425 IEM_MAYBE_LOAD_FLAGS A0_32, %2, %3, %4 2426 mov cl, A2_8 2427 %1 qword [A1], cl 2428 IEM_SAVE_FLAGS_RETVAL A0_32, %2, %3, 0 2419 2429 %else 2420 xchg A1, A0 2430 xchg A2, A0 2431 IEM_MAYBE_LOAD_FLAGS A2_32, %2, %3, %4 2421 2432 %1 qword [A1], cl 2433 IEM_SAVE_FLAGS_RETVAL A2_32, %2, %3, 0 2422 2434 %endif 2423 IEM_SAVE_FLAGS_OLD A2, %2, %3, 02424 2435 EPILOGUE_3_ARGS 2425 2436 ENDPROC iemAImpl_ %+ %1 %+ _u64 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r104209 r104238 3091 3091 */ 3092 3092 #define EMIT_ROL(a_cBitsWidth, a_uType, a_Suffix, a_fIntelFlags, a_fnHlp) \ 3093 IEM_DECL_IMPL_DEF( void, RT_CONCAT3(iemAImpl_rol_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, uint8_t cShift, uint32_t *pfEFlags)) \3093 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_rol_u,a_cBitsWidth,a_Suffix),(uint32_t fEFlags, a_uType *puDst, uint8_t cShift)) \ 3094 3094 { \ 3095 3095 cShift &= a_cBitsWidth >= 32 ? a_cBitsWidth - 1 : 31; \ … … 3105 3105 it the same way as for 1 bit shifts. */ \ 3106 3106 AssertCompile(X86_EFL_CF_BIT == 0); \ 3107 uint32_t fEfl = *pfEFlags; \ 3108 fEfl &= ~(X86_EFL_CF | X86_EFL_OF); \ 3107 fEFlags &= ~(X86_EFL_CF | X86_EFL_OF); \ 3109 3108 uint32_t const fCarry = (uResult & X86_EFL_CF); \ 3110 fE fl|= fCarry; \3109 fEFlags |= fCarry; \ 3111 3110 if (!a_fIntelFlags) /* AMD 3990X: According to the last sub-shift: */ \ 3112 fE fl|= ((uResult >> (a_cBitsWidth - 1)) ^ fCarry) << X86_EFL_OF_BIT; \3111 fEFlags |= ((uResult >> (a_cBitsWidth - 1)) ^ fCarry) << X86_EFL_OF_BIT; \ 3113 3112 else /* Intel 10980XE: According to the first sub-shift: */ \ 3114 fEfl |= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << 1)); \ 3115 *pfEFlags = fEfl; \ 3113 fEFlags |= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << 1)); \ 3116 3114 } \ 3115 return fEFlags; \ 3117 3116 } 3118 3117 … … 3154 3153 */ 3155 3154 #define EMIT_ROR(a_cBitsWidth, a_uType, a_Suffix, a_fIntelFlags, a_fnHlp) \ 3156 IEM_DECL_IMPL_DEF( void, RT_CONCAT3(iemAImpl_ror_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, uint8_t cShift, uint32_t *pfEFlags)) \3155 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_ror_u,a_cBitsWidth,a_Suffix),(uint32_t fEFlags, a_uType *puDst, uint8_t cShift)) \ 3157 3156 { \ 3158 3157 cShift &= a_cBitsWidth >= 32 ? a_cBitsWidth - 1 : 31; \ … … 3167 3166 /* Calc EFLAGS: */ \ 3168 3167 AssertCompile(X86_EFL_CF_BIT == 0); \ 3169 uint32_t fEfl = *pfEFlags; \ 3170 fEfl &= ~(X86_EFL_CF | X86_EFL_OF); \ 3168 fEFlags &= ~(X86_EFL_CF | X86_EFL_OF); \ 3171 3169 uint32_t const fCarry = (uResult >> ((a_cBitsWidth) - 1)) & X86_EFL_CF; \ 3172 fE fl|= fCarry; \3170 fEFlags |= fCarry; \ 3173 3171 if (!a_fIntelFlags) /* AMD 3990X: According to the last sub-shift: */ \ 3174 fE fl|= (((uResult >> ((a_cBitsWidth) - 2)) ^ fCarry) & 1) << X86_EFL_OF_BIT; \3172 fEFlags |= (((uResult >> ((a_cBitsWidth) - 2)) ^ fCarry) & 1) << X86_EFL_OF_BIT; \ 3175 3173 else /* Intel 10980XE: According to the first sub-shift: */ \ 3176 fEfl |= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << (a_cBitsWidth - 1))); \ 3177 *pfEFlags = fEfl; \ 3174 fEFlags |= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << (a_cBitsWidth - 1))); \ 3178 3175 } \ 3176 return fEFlags; \ 3179 3177 } 3180 3178 … … 3216 3214 */ 3217 3215 #define EMIT_RCL(a_cBitsWidth, a_uType, a_Suffix, a_fIntelFlags) \ 3218 IEM_DECL_IMPL_DEF( void, RT_CONCAT3(iemAImpl_rcl_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, uint8_t cShift, uint32_t *pfEFlags)) \3216 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_rcl_u,a_cBitsWidth,a_Suffix),(uint32_t fEFlags, a_uType *puDst, uint8_t cShift)) \ 3219 3217 { \ 3220 3218 cShift &= a_cBitsWidth >= 32 ? a_cBitsWidth - 1 : 31; \ … … 3231 3229 \ 3232 3230 AssertCompile(X86_EFL_CF_BIT == 0); \ 3233 uint32_t fEfl = *pfEFlags; \ 3234 uint32_t fInCarry = fEfl & X86_EFL_CF; \ 3231 uint32_t fInCarry = fEFlags & X86_EFL_CF; \ 3235 3232 uResult |= (a_uType)fInCarry << (cShift - 1); \ 3236 3233 \ … … 3238 3235 \ 3239 3236 /* Calc EFLAGS. */ \ 3240 fE fl&= ~(X86_EFL_CF | X86_EFL_OF); \3237 fEFlags &= ~(X86_EFL_CF | X86_EFL_OF); \ 3241 3238 uint32_t const fOutCarry = a_cBitsWidth >= 32 || a_fIntelFlags || cShift \ 3242 3239 ? (uDst >> (a_cBitsWidth - cShift)) & X86_EFL_CF : fInCarry; \ 3243 fE fl|= fOutCarry; \3240 fEFlags |= fOutCarry; \ 3244 3241 if (!a_fIntelFlags) /* AMD 3990X: According to the last sub-shift: */ \ 3245 fE fl|= ((uResult >> (a_cBitsWidth - 1)) ^ fOutCarry) << X86_EFL_OF_BIT; \3242 fEFlags |= ((uResult >> (a_cBitsWidth - 1)) ^ fOutCarry) << X86_EFL_OF_BIT; \ 3246 3243 else /* Intel 10980XE: According to the first sub-shift: */ \ 3247 fEfl |= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << 1)); \ 3248 *pfEFlags = fEfl; \ 3244 fEFlags |= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << 1)); \ 3249 3245 } \ 3246 return fEFlags; \ 3250 3247 } 3251 3248 … … 3279 3276 */ 3280 3277 #define EMIT_RCR(a_cBitsWidth, a_uType, a_Suffix, a_fIntelFlags) \ 3281 IEM_DECL_IMPL_DEF( void, RT_CONCAT3(iemAImpl_rcr_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, uint8_t cShift, uint32_t *pfEFlags)) \3278 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_rcr_u,a_cBitsWidth,a_Suffix),(uint32_t fEFlags, a_uType *puDst, uint8_t cShift)) \ 3282 3279 { \ 3283 3280 cShift &= a_cBitsWidth >= 32 ? a_cBitsWidth - 1 : 31; \ … … 3294 3291 \ 3295 3292 AssertCompile(X86_EFL_CF_BIT == 0); \ 3296 uint32_t fEfl = *pfEFlags; \ 3297 uint32_t fInCarry = fEfl & X86_EFL_CF; \ 3293 uint32_t fInCarry = fEFlags & X86_EFL_CF; \ 3298 3294 uResult |= (a_uType)fInCarry << (a_cBitsWidth - cShift); \ 3299 3295 *puDst = uResult; \ … … 3301 3297 /* Calc EFLAGS. The OF bit is undefined if cShift > 1, we implement \ 3302 3298 it the same way as for 1 bit shifts. */ \ 3303 fE fl&= ~(X86_EFL_CF | X86_EFL_OF); \3299 fEFlags &= ~(X86_EFL_CF | X86_EFL_OF); \ 3304 3300 uint32_t const fOutCarry = a_cBitsWidth >= 32 || a_fIntelFlags || cShift \ 3305 3301 ? (uDst >> (cShift - 1)) & X86_EFL_CF : fInCarry; \ 3306 fE fl|= fOutCarry; \3302 fEFlags |= fOutCarry; \ 3307 3303 if (!a_fIntelFlags) /* AMD 3990X: XOR two most signficant bits of the result: */ \ 3308 fE fl|= X86_EFL_GET_OF_ ## a_cBitsWidth(uResult ^ (uResult << 1)); \3304 fEFlags |= X86_EFL_GET_OF_ ## a_cBitsWidth(uResult ^ (uResult << 1)); \ 3309 3305 else /* Intel 10980XE: same as AMD, but only for the first sub-shift: */ \ 3310 fEfl |= (fInCarry ^ (uint32_t)(uDst >> (a_cBitsWidth - 1))) << X86_EFL_OF_BIT; \ 3311 *pfEFlags = fEfl; \ 3306 fEFlags |= (fInCarry ^ (uint32_t)(uDst >> (a_cBitsWidth - 1))) << X86_EFL_OF_BIT; \ 3312 3307 } \ 3308 return fEFlags; \ 3313 3309 } 3314 3310 … … 3342 3338 */ 3343 3339 #define EMIT_SHL(a_cBitsWidth, a_uType, a_Suffix, a_fIntelFlags) \ 3344 IEM_DECL_IMPL_DEF( void, RT_CONCAT3(iemAImpl_shl_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, uint8_t cShift, uint32_t *pfEFlags)) \3340 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_shl_u,a_cBitsWidth,a_Suffix),(uint32_t fEFlags, a_uType *puDst, uint8_t cShift)) \ 3345 3341 { \ 3346 3342 cShift &= a_cBitsWidth >= 32 ? a_cBitsWidth - 1 : 31; \ … … 3353 3349 /* Calc EFLAGS. */ \ 3354 3350 AssertCompile(X86_EFL_CF_BIT == 0); \ 3355 uint32_t fEfl = *pfEFlags &~X86_EFL_STATUS_BITS; \3356 uint32_t fCarry = (uDst >> (a_cBitsWidth - cShift)) & X86_EFL_CF; \3357 fE fl|= fCarry; \3351 fEFlags &= ~X86_EFL_STATUS_BITS; \ 3352 uint32_t const fCarry = (uDst >> (a_cBitsWidth - cShift)) & X86_EFL_CF; \ 3353 fEFlags |= fCarry; \ 3358 3354 if (!a_fIntelFlags) \ 3359 fE fl|= ((uResult >> (a_cBitsWidth - 1)) ^ fCarry) << X86_EFL_OF_BIT; /* AMD 3990X: Last shift result. */ \3355 fEFlags |= ((uResult >> (a_cBitsWidth - 1)) ^ fCarry) << X86_EFL_OF_BIT; /* AMD 3990X: Last shift result. */ \ 3360 3356 else \ 3361 fE fl|= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << 1)); /* Intel 10980XE: First shift result. */ \3362 fE fl|= X86_EFL_CALC_SF(uResult, a_cBitsWidth); \3363 fE fl|= X86_EFL_CALC_ZF(uResult); \3364 fE fl|= IEM_EFL_CALC_PARITY(uResult); \3357 fEFlags |= X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << 1)); /* Intel 10980XE: First shift result. */ \ 3358 fEFlags |= X86_EFL_CALC_SF(uResult, a_cBitsWidth); \ 3359 fEFlags |= X86_EFL_CALC_ZF(uResult); \ 3360 fEFlags |= IEM_EFL_CALC_PARITY(uResult); \ 3365 3361 if (!a_fIntelFlags) \ 3366 fEfl |= X86_EFL_AF; /* AMD 3990x sets it unconditionally, Intel 10980XE does the oposite */ \ 3367 *pfEFlags = fEfl; \ 3362 fEFlags |= X86_EFL_AF; /* AMD 3990x sets it unconditionally, Intel 10980XE does the oposite */ \ 3368 3363 } \ 3364 return fEFlags; \ 3369 3365 } 3370 3366 … … 3402 3398 */ 3403 3399 #define EMIT_SHR(a_cBitsWidth, a_uType, a_Suffix, a_fIntelFlags) \ 3404 IEM_DECL_IMPL_DEF( void, RT_CONCAT3(iemAImpl_shr_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, uint8_t cShift, uint32_t *pfEFlags)) \3400 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_shr_u,a_cBitsWidth,a_Suffix),(uint32_t fEFlags, a_uType *puDst, uint8_t cShift)) \ 3405 3401 { \ 3406 3402 cShift &= a_cBitsWidth >= 32 ? a_cBitsWidth - 1 : 31; \ … … 3413 3409 /* Calc EFLAGS. */ \ 3414 3410 AssertCompile(X86_EFL_CF_BIT == 0); \ 3415 uint32_t fEfl = *pfEFlags &~X86_EFL_STATUS_BITS; \3416 fE fl|= (uDst >> (cShift - 1)) & X86_EFL_CF; \3411 fEFlags &= ~X86_EFL_STATUS_BITS; \ 3412 fEFlags |= (uDst >> (cShift - 1)) & X86_EFL_CF; \ 3417 3413 if (a_fIntelFlags || cShift == 1) /* AMD 3990x does what intel documents; Intel 10980XE does this for all shift counts. */ \ 3418 fE fl|= (uDst >> (a_cBitsWidth - 1)) << X86_EFL_OF_BIT; \3419 fE fl|= X86_EFL_CALC_SF(uResult, a_cBitsWidth); \3420 fE fl|= X86_EFL_CALC_ZF(uResult); \3421 fE fl|= IEM_EFL_CALC_PARITY(uResult); \3414 fEFlags |= (uDst >> (a_cBitsWidth - 1)) << X86_EFL_OF_BIT; \ 3415 fEFlags |= X86_EFL_CALC_SF(uResult, a_cBitsWidth); \ 3416 fEFlags |= X86_EFL_CALC_ZF(uResult); \ 3417 fEFlags |= IEM_EFL_CALC_PARITY(uResult); \ 3422 3418 if (!a_fIntelFlags) \ 3423 fEfl |= X86_EFL_AF; /* AMD 3990x sets it unconditionally, Intel 10980XE does the oposite */ \ 3424 *pfEFlags = fEfl; \ 3419 fEFlags |= X86_EFL_AF; /* AMD 3990x sets it unconditionally, Intel 10980XE does the oposite */ \ 3425 3420 } \ 3421 return fEFlags; \ 3426 3422 } 3427 3423 … … 3459 3455 */ 3460 3456 #define EMIT_SAR(a_cBitsWidth, a_uType, a_iType, a_Suffix, a_fIntelFlags) \ 3461 IEM_DECL_IMPL_DEF( void, RT_CONCAT3(iemAImpl_sar_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, uint8_t cShift, uint32_t *pfEFlags)) \3457 IEM_DECL_IMPL_DEF(uint32_t, RT_CONCAT3(iemAImpl_sar_u,a_cBitsWidth,a_Suffix),(uint32_t fEFlags, a_uType *puDst, uint8_t cShift)) \ 3462 3458 { \ 3463 3459 cShift &= a_cBitsWidth >= 32 ? a_cBitsWidth - 1 : 31; \ … … 3471 3467 Note! The OF flag is always zero because the result never differs from the input. */ \ 3472 3468 AssertCompile(X86_EFL_CF_BIT == 0); \ 3473 uint32_t fEfl = *pfEFlags &~X86_EFL_STATUS_BITS; \3474 fE fl|= (iDst >> (cShift - 1)) & X86_EFL_CF; \3475 fE fl|= X86_EFL_CALC_SF(uResult, a_cBitsWidth); \3476 fE fl|= X86_EFL_CALC_ZF(uResult); \3477 fE fl|= IEM_EFL_CALC_PARITY(uResult); \3469 fEFlags &= ~X86_EFL_STATUS_BITS; \ 3470 fEFlags |= (iDst >> (cShift - 1)) & X86_EFL_CF; \ 3471 fEFlags |= X86_EFL_CALC_SF(uResult, a_cBitsWidth); \ 3472 fEFlags |= X86_EFL_CALC_ZF(uResult); \ 3473 fEFlags |= IEM_EFL_CALC_PARITY(uResult); \ 3478 3474 if (!a_fIntelFlags) \ 3479 fEfl |= X86_EFL_AF; /* AMD 3990x sets it unconditionally, Intel 10980XE does the oposite */ \ 3480 *pfEFlags = fEfl; \ 3475 fEFlags |= X86_EFL_AF; /* AMD 3990x sets it unconditionally, Intel 10980XE does the oposite */ \ 3481 3476 } \ 3477 return fEFlags; \ 3482 3478 } 3483 3479 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r104208 r104238 8815 8815 IEM_MC_BEGIN(IEM_MC_F_MIN_186, 0); \ 8816 8816 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 8817 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 8818 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); \ 8819 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 8817 IEM_MC_ARG(uint8_t *, pu8Dst, 1); \ 8820 8818 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 8821 IEM_MC_REF_EFLAGS(pEFlags); \ 8822 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); \ 8819 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 8820 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 8821 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU8, fEFlagsIn, pu8Dst, cShiftArg); \ 8822 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 8823 8823 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 8824 8824 IEM_MC_END(); \ … … 8834 8834 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 8835 8835 \ 8836 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \8837 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \8836 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 8837 IEM_MC_ARG(uint8_t *, pu8Dst, 1); \ 8838 8838 IEM_MC_MEM_MAP_U8_RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 8839 8839 \ 8840 IEM_MC_ARG_ CONST(uint8_t, cShiftArg, cShift, 1); \8841 IEM_MC_ARG_ LOCAL_EFLAGS( pEFlags, EFlags,2); \8842 IEM_MC_CALL_ VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); \8840 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 8841 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 8842 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU8, fEFlagsIn, pu8Dst, cShiftArg); \ 8843 8843 \ 8844 8844 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 8845 IEM_MC_COMMIT_EFLAGS( EFlags); \8845 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 8846 8846 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 8847 8847 IEM_MC_END(); \ … … 8963 8963 IEM_MC_BEGIN(IEM_MC_F_MIN_186, 0); \ 8964 8964 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 8965 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 8966 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); \ 8967 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 8965 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \ 8968 8966 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 8969 IEM_MC_REF_EFLAGS(pEFlags); \ 8970 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); \ 8967 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 8968 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 8969 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU16, fEFlagsIn, pu16Dst, cShiftArg); \ 8970 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 8971 8971 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 8972 8972 IEM_MC_END(); \ … … 8976 8976 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 8977 8977 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 8978 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 8979 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); \ 8980 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 8978 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \ 8981 8979 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 8982 IEM_MC_REF_EFLAGS(pEFlags); \ 8983 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \ 8980 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 8981 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 8982 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU32, fEFlagsIn, pu32Dst, cShiftArg); \ 8984 8983 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm)); \ 8984 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 8985 8985 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 8986 8986 IEM_MC_END(); \ … … 8990 8990 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 8991 8991 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 8992 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 8993 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1); \ 8994 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 8992 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \ 8995 8993 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 8996 IEM_MC_REF_EFLAGS(pEFlags); \ 8997 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \ 8994 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 8995 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 8996 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU64, fEFlagsIn, pu64Dst, cShiftArg); \ 8997 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 8998 8998 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 8999 8999 IEM_MC_END(); \ … … 9017 9017 \ 9018 9018 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9019 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \9019 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \ 9020 9020 IEM_MC_MEM_MAP_U16_RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9021 9021 \ 9022 IEM_MC_ARG_ CONST(uint8_t, cShiftArg, cShift, 1); \9023 IEM_MC_ARG_ LOCAL_EFLAGS( pEFlags, EFlags,2); \9024 IEM_MC_CALL_ VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); \9022 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9023 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 9024 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU16, fEFlagsIn, pu16Dst, cShiftArg); \ 9025 9025 \ 9026 9026 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 9027 IEM_MC_COMMIT_EFLAGS( EFlags); \9027 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9028 9028 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9029 9029 IEM_MC_END(); \ … … 9039 9039 \ 9040 9040 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9041 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \9041 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \ 9042 9042 IEM_MC_MEM_MAP_U32_RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9043 9043 \ 9044 IEM_MC_ARG_ CONST(uint8_t, cShiftArg, cShift, 1); \9045 IEM_MC_ARG_ LOCAL_EFLAGS( pEFlags, EFlags,2); \9046 IEM_MC_CALL_ VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \9044 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9045 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 9046 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU32, fEFlagsIn, pu32Dst, cShiftArg); \ 9047 9047 \ 9048 9048 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 9049 IEM_MC_COMMIT_EFLAGS( EFlags); \9049 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9050 9050 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9051 9051 IEM_MC_END(); \ … … 9061 9061 \ 9062 9062 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9063 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \9063 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \ 9064 9064 IEM_MC_MEM_MAP_U64_RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9065 9065 \ 9066 IEM_MC_ARG_ CONST(uint8_t, cShiftArg, cShift, 1); \9067 IEM_MC_ARG_ LOCAL_EFLAGS( pEFlags, EFlags,2); \9068 IEM_MC_CALL_ VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \9066 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9067 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/ cShift, 2); \ 9068 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU64, fEFlagsIn, pu64Dst, cShiftArg); \ 9069 9069 \ 9070 9070 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 9071 IEM_MC_COMMIT_EFLAGS( EFlags); \9071 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9072 9072 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9073 9073 IEM_MC_END(); \ … … 9693 9693 IEM_MC_BEGIN(0, 0); \ 9694 9694 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9695 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 9696 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1); \ 9697 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 9695 IEM_MC_ARG(uint8_t *, pu8Dst, 1); \ 9698 9696 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 9699 IEM_MC_REF_EFLAGS(pEFlags); \ 9700 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); \ 9697 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9698 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 2); \ 9699 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU8, fEFlagsIn, pu8Dst, cShiftArg); \ 9700 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9701 9701 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9702 9702 IEM_MC_END(); \ … … 9706 9706 /* memory */ \ 9707 9707 IEM_MC_BEGIN(0, 0); \ 9708 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 9709 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1); \ 9710 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9711 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9712 \ 9708 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9713 9709 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 9714 9710 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9711 \ 9712 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9713 IEM_MC_ARG(uint8_t *, pu8Dst, 1); \ 9715 9714 IEM_MC_MEM_MAP_U8_RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9716 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 9717 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); \ 9715 \ 9716 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9717 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 2); \ 9718 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU8, fEFlagsIn, pu8Dst, cShiftArg); \ 9718 9719 \ 9719 9720 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 9720 IEM_MC_COMMIT_EFLAGS( EFlags); \9721 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9721 9722 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9722 9723 IEM_MC_END(); \ … … 9829 9830 IEM_MC_BEGIN(0, 0); \ 9830 9831 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9831 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 9832 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); \ 9833 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 9832 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \ 9834 9833 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 9835 IEM_MC_REF_EFLAGS(pEFlags); \ 9836 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); \ 9834 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9835 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/ 1, 2); \ 9836 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU16, fEFlagsIn, pu16Dst, cShiftArg); \ 9837 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9837 9838 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9838 9839 IEM_MC_END(); \ … … 9842 9843 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 9843 9844 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9844 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 9845 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); \ 9846 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 9845 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \ 9847 9846 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 9848 IEM_MC_REF_EFLAGS(pEFlags); \ 9849 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \ 9847 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9848 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/ 1, 2); \ 9849 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU32, fEFlagsIn, pu32Dst, cShiftArg); \ 9850 9850 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm)); \ 9851 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9851 9852 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9852 9853 IEM_MC_END(); \ … … 9856 9857 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 9857 9858 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9858 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 9859 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); \ 9860 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 9859 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \ 9861 9860 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 9862 IEM_MC_REF_EFLAGS(pEFlags); \ 9863 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \ 9861 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9862 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/ 1, 2); \ 9863 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU64, fEFlagsIn, pu64Dst, cShiftArg); \ 9864 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9864 9865 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9865 9866 IEM_MC_END(); \ … … 9876 9877 case IEMMODE_16BIT: \ 9877 9878 IEM_MC_BEGIN(0, 0); \ 9878 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 9879 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); \ 9880 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9881 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9882 \ 9879 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9883 9880 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 9884 9881 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9882 \ 9883 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \ 9884 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9885 9885 IEM_MC_MEM_MAP_U16_RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9886 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 9887 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); \ 9886 \ 9887 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9888 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/ 1, 2); \ 9889 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU16, fEFlagsIn, pu16Dst, cShiftArg); \ 9888 9890 \ 9889 9891 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 9890 IEM_MC_COMMIT_EFLAGS( EFlags); \9892 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9891 9893 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9892 9894 IEM_MC_END(); \ … … 9895 9897 case IEMMODE_32BIT: \ 9896 9898 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 9897 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 9898 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); \ 9899 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9900 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9901 \ 9899 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9902 9900 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 9903 9901 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9902 \ 9903 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9904 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \ 9904 9905 IEM_MC_MEM_MAP_U32_RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9905 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 9906 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \ 9906 \ 9907 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9908 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/ 1, 2); \ 9909 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU32, fEFlagsIn, pu32Dst, cShiftArg); \ 9907 9910 \ 9908 9911 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 9909 IEM_MC_COMMIT_EFLAGS( EFlags); \9912 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9910 9913 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9911 9914 IEM_MC_END(); \ … … 9914 9917 case IEMMODE_64BIT: \ 9915 9918 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 9916 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 9917 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1); \ 9918 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9919 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9920 \ 9919 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 9921 9920 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 9922 9921 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 9922 \ 9923 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 9924 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \ 9923 9925 IEM_MC_MEM_MAP_U64_RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 9924 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 9925 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \ 9926 \ 9927 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 9928 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/ 1, 2); \ 9929 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU64, fEFlagsIn, pu64Dst, cShiftArg); \ 9926 9930 \ 9927 9931 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 9928 IEM_MC_COMMIT_EFLAGS( EFlags); \9932 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 9929 9933 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 9930 9934 IEM_MC_END(); \ … … 10057 10061 IEM_MC_BEGIN(0, 0); \ 10058 10062 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10059 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 10060 IEM_MC_ARG(uint8_t, cShiftArg, 1); \ 10061 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 10063 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10062 10064 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10065 IEM_MC_ARG(uint8_t *, pu8Dst, 1); \ 10063 10066 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10064 IEM_MC_REF_EFLAGS(pEFlags); \ 10065 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); \ 10067 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10068 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU8, fEFlagsIn, pu8Dst, cShiftArg); \ 10069 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10066 10070 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10067 10071 IEM_MC_END(); \ … … 10071 10075 /* memory */ \ 10072 10076 IEM_MC_BEGIN(0, 0); \ 10073 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \10074 IEM_MC_ARG(uint8_t, cShiftArg, 1); \10075 10077 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 10076 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \10077 \10078 10078 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 10079 10079 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10080 \ 10081 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10082 IEM_MC_ARG(uint8_t *, pu8Dst, 1); \ 10083 IEM_MC_MEM_MAP_U8_RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10084 \ 10085 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10080 10086 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10081 IEM_MC_MEM_MAP_U8_RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst);\10082 IEM_MC_ARG_ LOCAL_EFLAGS(pEFlags, EFlags, 2); \10083 IEM_MC_CALL_ VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); \10087 \ 10088 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10089 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU8, fEFlagsIn, pu8Dst, cShiftArg); \ 10084 10090 \ 10085 10091 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 10086 IEM_MC_COMMIT_EFLAGS( EFlags); \10092 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10087 10093 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10088 10094 IEM_MC_END(); \ … … 10195 10201 IEM_MC_BEGIN(0, 0); \ 10196 10202 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10197 IEM_MC_ARG(uint8_t, cShiftArg, 1); \10203 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10198 10204 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 10199 10205 IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(cShiftArg, X86_GREG_xCX); \ … … 10207 10213 } IEM_MC_NATIVE_ELSE() { \ 10208 10214 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10209 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \10215 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \ 10210 10216 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10211 IEM_MC_ARG (uint32_t *, pEFlags, 2); \10212 IEM_MC_ REF_EFLAGS(pEFlags); \10213 IEM_MC_C ALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); \10217 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10218 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU16, fEFlagsIn, pu16Dst, cShiftArg); \ 10219 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10214 10220 } IEM_MC_NATIVE_ENDIF(); \ 10215 10221 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 10220 10226 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 10221 10227 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10222 IEM_MC_ARG(uint8_t, cShiftArg, 1); \10228 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10223 10229 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 10224 10230 IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(cShiftArg, X86_GREG_xCX); \ … … 10232 10238 } IEM_MC_NATIVE_ELSE() { \ 10233 10239 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10234 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \10240 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \ 10235 10241 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10236 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 10237 IEM_MC_REF_EFLAGS(pEFlags); \ 10238 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \ 10242 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10243 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU32, fEFlagsIn, pu32Dst, cShiftArg); \ 10239 10244 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10245 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10240 10246 } IEM_MC_NATIVE_ENDIF(); \ 10241 10247 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 10246 10252 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 10247 10253 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10248 IEM_MC_ARG(uint8_t, cShiftArg, 1); \10254 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10249 10255 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 10250 10256 IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(cShiftArg, X86_GREG_xCX); \ … … 10258 10264 } IEM_MC_NATIVE_ELSE() { \ 10259 10265 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10260 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \10266 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \ 10261 10267 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 10262 IEM_MC_ARG (uint32_t *, pEFlags, 2); \10263 IEM_MC_ REF_EFLAGS(pEFlags); \10264 IEM_MC_C ALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \10268 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10269 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU64, fEFlagsIn, pu64Dst, cShiftArg); \ 10270 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10265 10271 } IEM_MC_NATIVE_ENDIF(); \ 10266 10272 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ … … 10278 10284 case IEMMODE_16BIT: \ 10279 10285 IEM_MC_BEGIN(0, 0); \ 10280 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 10281 IEM_MC_ARG(uint8_t, cShiftArg, 1); \ 10282 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 10283 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10284 \ 10286 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 10285 10287 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 10286 10288 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10289 \ 10290 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10291 IEM_MC_ARG(uint16_t *, pu16Dst, 1); \ 10292 IEM_MC_MEM_MAP_U16_RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10293 \ 10294 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10287 10295 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10288 IEM_MC_MEM_MAP_U16_RW(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10289 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 10290 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); \ 10296 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10297 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU16, fEFlagsIn, pu16Dst, cShiftArg); \ 10291 10298 \ 10292 10299 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 10293 IEM_MC_COMMIT_EFLAGS( EFlags); \10300 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10294 10301 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10295 10302 IEM_MC_END(); \ … … 10298 10305 case IEMMODE_32BIT: \ 10299 10306 IEM_MC_BEGIN(IEM_MC_F_MIN_386, 0); \ 10300 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 10301 IEM_MC_ARG(uint8_t, cShiftArg, 1); \ 10302 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 10303 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10304 \ 10307 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 10305 10308 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 10306 10309 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10310 \ 10311 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10312 IEM_MC_ARG(uint32_t *, pu32Dst, 1); \ 10313 IEM_MC_MEM_MAP_U32_RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10314 \ 10315 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10307 10316 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10308 IEM_MC_MEM_MAP_U32_RW(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10309 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 10310 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); \ 10317 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10318 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU32, fEFlagsIn, pu32Dst, cShiftArg); \ 10311 10319 \ 10312 10320 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 10313 IEM_MC_COMMIT_EFLAGS( EFlags); \10321 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10314 10322 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10315 10323 IEM_MC_END(); \ … … 10318 10326 case IEMMODE_64BIT: \ 10319 10327 IEM_MC_BEGIN(IEM_MC_F_64BIT, 0); \ 10320 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \10321 IEM_MC_ARG(uint8_t, cShiftArg, 1); \10322 10328 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 10323 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \10324 \10325 10329 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 10326 10330 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 10331 \ 10332 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 10333 IEM_MC_ARG(uint64_t *, pu64Dst, 1); \ 10334 IEM_MC_MEM_MAP_U64_RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10335 \ 10336 IEM_MC_ARG(uint8_t, cShiftArg, 2); \ 10327 10337 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); \ 10328 IEM_MC_MEM_MAP_U64_RW(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 10329 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); \ 10330 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); \ 10338 IEM_MC_ARG_EFLAGS( fEFlagsIn, 0); \ 10339 IEM_MC_CALL_AIMPL_3(uint32_t, fEFlagsRet, pImpl->pfnNormalU64, fEFlagsIn, pu64Dst, cShiftArg); \ 10331 10340 \ 10332 10341 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ 10333 IEM_MC_COMMIT_EFLAGS( EFlags); \10342 IEM_MC_COMMIT_EFLAGS(fEFlagsRet); \ 10334 10343 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 10335 10344 IEM_MC_END(); \ -
trunk/src/VBox/VMM/include/IEMInternal.h
r104209 r104238 2895 2895 /** @name Shift operations on bytes (Group 2). 2896 2896 * @{ */ 2897 typedef IEM_DECL_IMPL_TYPE( void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));2897 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU8,(uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t cShift)); 2898 2898 typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8; 2899 2899 FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel; … … 2908 2908 /** @name Shift operations on words (Group 2). 2909 2909 * @{ */ 2910 typedef IEM_DECL_IMPL_TYPE( void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));2910 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU16,(uint32_t fEFlagsIn, uint16_t *pu16Dst, uint8_t cShift)); 2911 2911 typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16; 2912 2912 FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel; … … 2921 2921 /** @name Shift operations on double words (Group 2). 2922 2922 * @{ */ 2923 typedef IEM_DECL_IMPL_TYPE( void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));2923 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU32,(uint32_t fEFlagsIn, uint32_t *pu32Dst, uint8_t cShift)); 2924 2924 typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32; 2925 2925 FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel; … … 2934 2934 /** @name Shift operations on words (Group 2). 2935 2935 * @{ */ 2936 typedef IEM_DECL_IMPL_TYPE( void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));2936 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU64,(uint32_t fEFlagsIn, uint64_t *pu64Dst, uint8_t cShift)); 2937 2937 typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64; 2938 2938 FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel; -
trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp
r104208 r104238 2783 2783 a_TestType Test; \ 2784 2784 Test.fEflIn = RandEFlags(); \ 2785 Test.fEflOut = Test.fEflIn; \2786 2785 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \ 2787 2786 Test.uDstOut = Test.uDstIn; \ 2788 2787 Test.uSrcIn = 0; \ 2789 2788 Test.uMisc = RandU8() & (a_cBits * 4 - 1); /* need to go way beyond the a_cBits limit */ \ 2790 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \2789 Test.fEflOut = a_aSubTests[iFn].pfnNative(Test.fEflIn, &Test.uDstOut, Test.uMisc); \ 2791 2790 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \ 2792 2791 \ 2793 2792 Test.fEflIn = (~Test.fEflIn & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK; \ 2794 Test.fEflOut = Test.fEflIn; \2795 2793 Test.uDstOut = Test.uDstIn; \ 2796 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \2794 Test.fEflOut = a_aSubTests[iFn].pfnNative(Test.fEflIn, &Test.uDstOut, Test.uMisc); \ 2797 2795 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \ 2798 2796 } \ … … 2838 2836 for (uint32_t i = 0; i < cIterations; i++) \ 2839 2837 { \ 2840 uint32_t fBenchEfl = fEflIn; \2841 2838 a_uType uBenchDst = uDstIn; \ 2842 pfn( &uBenchDst, cShift, &fBenchEfl); \2839 pfn(fEflIn, &uBenchDst, cShift); \ 2843 2840 \ 2844 fBenchEfl = fEflIn; \2845 2841 uBenchDst = uDstIn; \ 2846 pfn( &uBenchDst, cShift, &fBenchEfl); \2842 pfn(fEflIn, &uBenchDst, cShift); \ 2847 2843 \ 2848 fBenchEfl = fEflIn; \2849 2844 uBenchDst = uDstIn; \ 2850 pfn( &uBenchDst, cShift, &fBenchEfl); \2845 pfn(fEflIn, &uBenchDst, cShift); \ 2851 2846 \ 2852 fBenchEfl = fEflIn; \2853 2847 uBenchDst = uDstIn; \ 2854 pfn( &uBenchDst, cShift, &fBenchEfl); \2848 pfn(fEflIn, &uBenchDst, cShift); \ 2855 2849 } \ 2856 2850 return RTTimeNanoTS() - nsStart; \ … … 2872 2866 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \ 2873 2867 { \ 2874 uint32_t fEfl = paTests[iTest].fEflIn; \ 2875 a_uType uDst = paTests[iTest].uDstIn; \ 2876 pfn(&uDst, paTests[iTest].uMisc, &fEfl); \ 2877 if ( uDst != paTests[iTest].uDstOut \ 2878 || fEfl != paTests[iTest].fEflOut ) \ 2868 a_uType uDst = paTests[iTest].uDstIn; \ 2869 uint32_t fEflOut = pfn(paTests[iTest].fEflIn, &uDst, paTests[iTest].uMisc); \ 2870 if ( uDst != paTests[iTest].uDstOut \ 2871 || fEflOut != paTests[iTest].fEflOut ) \ 2879 2872 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " shift=%2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \ 2880 2873 iTest, iVar == 0 ? "" : "/n", \ 2881 2874 paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uMisc, \ 2882 fEfl , uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \2883 EFlagsDiff(fEfl , paTests[iTest].fEflOut)); \2875 fEflOut, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \ 2876 EFlagsDiff(fEflOut, paTests[iTest].fEflOut)); \ 2884 2877 else \ 2885 2878 { \ 2886 2879 *g_pu ## a_cBits = paTests[iTest].uDstIn; \ 2887 *g_pfEfl = paTests[iTest].fEflIn; \ 2888 pfn(g_pu ## a_cBits, paTests[iTest].uMisc, g_pfEfl); \ 2880 fEflOut = pfn(paTests[iTest].fEflIn, g_pu ## a_cBits, paTests[iTest].uMisc); \ 2889 2881 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \ 2890 RTTEST_CHECK(g_hTest, *g_pfEfl== paTests[iTest].fEflOut); \2882 RTTEST_CHECK(g_hTest, fEflOut == paTests[iTest].fEflOut); \ 2891 2883 } \ 2892 2884 } \
Note:
See TracChangeset
for help on using the changeset viewer.