Changeset 105853 in vbox
- Timestamp:
- Aug 23, 2024 8:36:08 PM (8 months ago)
- svn:sync-xref-src-repo-rev:
- 164572
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r105664 r105853 1873 1873 self.asParams = asParams; 1874 1874 self.oUser = None; 1875 1876 def __eq__(self, oOther): 1877 if self.sName != oOther.sName: 1878 return False; 1879 if len(self.asParams) != len(oOther.asParams): 1880 return False; 1881 for iParam, sMyParam in enumerate(self.asParams): 1882 if sMyParam != oOther.asParams[iParam]: 1883 return False; 1884 return True; 1875 1885 1876 1886 def renderCode(self, cchIndent = 0): -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r105768 r105853 402 402 aoStmts[iStmt] = oStmt; 403 403 404 fHadEmptyElseBranch = len(oStmt.aoElseBranch) == 0; 405 404 406 # Check the two branches for final references. Both branches must 405 407 # start processing with the same dVars set, fortunately as shallow 406 408 # copy suffices. 407 409 dFreedInIfBranch = self.__analyzeVariableLiveness(oStmt.aoIfBranch, dict(dVars), iDepth + 1); 408 dFreedInElseBranch = self.__analyzeVariableLiveness(oStmt.aoElseBranch, dVars, iDepth + 1);410 dFreedInElseBranch = self.__analyzeVariableLiveness(oStmt.aoElseBranch, dVars, iDepth + 1); 409 411 410 412 # Add free statements to the start of the IF-branch for variables use … … 434 436 oStmt.aoIfBranch.insert(0, oFreeStmt); 435 437 oStmt.aoElseBranch.insert(0, oFreeStmt); 438 439 # 440 # HACK ALERT! 441 # 442 # This is a bit backwards, but if the else branch was empty, just zap 443 # it so we don't create a bunch of unnecessary jumps as well as a 444 # potential troublesome dirty guest shadowed register flushing for the 445 # if-branch. The IEM_MC_ENDIF code is forgiving here and will 446 # automatically free the lost variables when merging the states. 447 # 448 # (In fact this behaviour caused trouble if we moved the IEM_MC_FREE_LOCAL 449 # statements ouf of the branches and put them after the IF/ELSE blocks 450 # to try avoid the unnecessary jump troubles, as the variable would be 451 # assigned a host register and thus differ in an incompatible, cause the 452 # endif code to just free the register and variable both, with the result 453 # that the IEM_MC_FREE_LOCAL following the IF/ELSE blocks would assert 454 # since the variable was already freed.) 455 # 456 # See iemNativeRecompFunc_cmovne_Gv_Ev__greg64_nn_64 and 457 # the other cmovcc functions for examples. 458 # 459 if fHadEmptyElseBranch: 460 oStmt.aoElseBranch = []; 461 #while ( oStmt.aoIfBranch 462 # and oStmt.aoElseBranch 463 # and oStmt.aoIfBranch[-1] == oStmt.aoElseBranch[-1]): 464 # aoStmts.insert(iStmt + 1, oStmt.aoIfBranch[-1]); 465 # del oStmt.aoIfBranch[-1]; 466 # del oStmt.aoElseBranch[-1]; 436 467 437 468 elif not oStmt.isCppStmt(): -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r105818 r105853 89 89 90 90 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 91 # if defined(IEMNATIVE_REG_FIXED_PC_DBG) 91 92 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 93 /** 94 * Updates IEMCPU::uPcUpdatingDebug. 95 */ 96 DECL_INLINE_THROW(uint32_t) iemNativeEmitPcDebugAdd(PIEMRECOMPILERSTATE pReNative, uint32_t off, int64_t offDisp, uint8_t cBits) 97 { 98 # ifdef RT_ARCH_AMD64 99 if (pReNative->Core.fDebugPcInitialized && cBits >= 32) 100 { 101 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 102 if ((int32_t)offDisp == offDisp || cBits != 64) 103 { 104 /* add [q]word [pVCpu->iem.s.uPcUpdatingDebug], imm32/imm8 */ 105 if (cBits == 64) 106 pCodeBuf[off++] = X86_OP_REX_W; 107 pCodeBuf[off++] = (int8_t)offDisp == offDisp ? 0x83 : 0x81; 108 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 109 if ((int8_t)offDisp == offDisp) 110 pCodeBuf[off++] = (int8_t)offDisp; 111 else 112 { 113 *(int32_t *)&pCodeBuf[off] = (int32_t)offDisp; 114 off += sizeof(int32_t); 115 } 116 } 117 else 118 { 119 /* mov tmp0, imm64 */ 120 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, offDisp); 121 122 /* add [pVCpu->iem.s.uPcUpdatingDebug], tmp0 */ 123 if (cBits == 64) 124 pCodeBuf[off++] = X86_OP_REX_W | (IEMNATIVE_REG_FIXED_TMP0 >= 8 ? X86_OP_REX_R : 0); 125 else if (IEMNATIVE_REG_FIXED_TMP0 >= 8) 126 pCodeBuf[off++] = X86_OP_REX_R; 127 pCodeBuf[off++] = 0x01; 128 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0 & 7, 129 RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 130 } 131 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 132 return off; 133 } 134 # endif 135 136 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 137 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, RT_ARCH_VAL == RT_ARCH_VAL_AMD64 ? 32 : 12); 138 139 if (pReNative->Core.fDebugPcInitialized) 140 { 141 Log4(("uPcUpdatingDebug+=%ld cBits=%d off=%#x\n", offDisp, cBits, off)); 142 off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, idxTmpReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 143 } 144 else 145 { 146 Log4(("uPcUpdatingDebug=rip+%ld cBits=%d off=%#x\n", offDisp, cBits, off)); 147 pReNative->Core.fDebugPcInitialized = true; 148 off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, idxTmpReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 149 } 150 151 if (cBits == 64) 152 off = iemNativeEmitAddGprImmEx(pCodeBuf, off, idxTmpReg, offDisp, IEMNATIVE_REG_FIXED_TMP0); 153 else 154 { 155 off = iemNativeEmitAddGpr32ImmEx(pCodeBuf, off, idxTmpReg, (int32_t)offDisp, IEMNATIVE_REG_FIXED_TMP0); 156 if (cBits == 16) 157 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxTmpReg, UINT16_MAX); 158 } 159 160 off = iemNativeEmitStoreGprToVCpuU64Ex(pCodeBuf, off, idxTmpReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug), 161 IEMNATIVE_REG_FIXED_TMP0); 162 163 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 164 iemNativeRegFreeTmp(pReNative, idxTmpReg); 165 return off; 166 } 167 168 169 # elif defined(IEMNATIVE_REG_FIXED_PC_DBG) 92 170 DECL_INLINE_THROW(uint32_t) iemNativePcAdjustCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off) 93 171 { … … 99 177 } 100 178 # endif 179 101 180 #endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING */ 102 181 … … 391 470 * can disregard its state when we hit the IEM_MC_ENDIF. 392 471 */ 393 uint8_t idxCondDepth = pReNative->cCondDepth; 394 if (idxCondDepth) 395 { 396 idxCondDepth--; 397 if (pReNative->aCondStack[idxCondDepth].fInElse) 398 pReNative->aCondStack[idxCondDepth].fElseExitTb = true; 399 else 400 pReNative->aCondStack[idxCondDepth].fIfExitTb = true; 401 } 472 iemNativeMarkCurCondBranchAsExiting(pReNative); 402 473 403 474 /* … … 510 581 511 582 pReNative->Core.offPc += cbInstr; 512 # if defined(IEMNATIVE_REG_FIXED_PC_DBG) 583 Log4(("offPc=%x cbInstr=%#x off=%#x\n", pReNative->Core.offPc, cbInstr, off)); 584 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 585 off = iemNativeEmitPcDebugAdd(pReNative, off, cbInstr, 64); 586 off = iemNativeEmitPcDebugCheck(pReNative, off); 587 # elif defined(IEMNATIVE_REG_FIXED_PC_DBG) 513 588 off = iemNativePcAdjustCheck(pReNative, off); 514 589 # endif 590 515 591 if (pReNative->cCondDepth) 516 592 off = iemNativeEmitPcWriteback(pReNative, off); 517 593 else 518 594 pReNative->Core.cInstrPcUpdateSkipped++; 595 519 596 #endif 520 597 … … 535 612 DECL_INLINE_THROW(uint32_t) 536 613 iemNativeEmitAddToEip32AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 614 { 615 #if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG) 616 # ifdef IEMNATIVE_REG_FIXED_PC_DBG 617 if (!pReNative->Core.offPc) 618 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 619 # endif 620 621 /* Allocate a temporary PC register. */ 622 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 623 624 /* Perform the addition and store the result. */ 625 off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr); 626 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 627 628 /* Free but don't flush the PC register. */ 629 iemNativeRegFreeTmp(pReNative, idxPcReg); 630 #endif 631 632 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 633 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 634 635 pReNative->Core.offPc += cbInstr; 636 Log4(("offPc=%x cbInstr=%#x off=%#x\n", pReNative->Core.offPc, cbInstr, off)); 637 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 638 off = iemNativeEmitPcDebugAdd(pReNative, off, cbInstr, 32); 639 off = iemNativeEmitPcDebugCheck(pReNative, off); 640 # elif defined(IEMNATIVE_REG_FIXED_PC_DBG) 641 off = iemNativePcAdjustCheck(pReNative, off); 642 # endif 643 644 if (pReNative->cCondDepth) 645 off = iemNativeEmitPcWriteback(pReNative, off); 646 else 647 pReNative->Core.cInstrPcUpdateSkipped++; 648 #endif 649 650 return off; 651 } 652 653 654 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \ 655 off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \ 656 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0) 657 658 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \ 659 off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \ 660 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 661 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0) 662 663 /** Same as iemRegAddToIp16AndFinishingNoFlags. */ 664 DECL_INLINE_THROW(uint32_t) 665 iemNativeEmitAddToIp16AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 537 666 { 538 667 #if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG) … … 547 676 /* Perform the addition and store the result. */ 548 677 off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr); 678 off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg); 549 679 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 550 680 … … 557 687 558 688 pReNative->Core.offPc += cbInstr; 559 # if defined(IEMNATIVE_REG_FIXED_PC_DBG) 689 Log4(("offPc=%x cbInstr=%#x off=%#x\n", pReNative->Core.offPc, cbInstr, off)); 690 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 691 off = iemNativeEmitPcDebugAdd(pReNative, off, cbInstr, 16); 692 off = iemNativeEmitPcDebugCheck(pReNative, off); 693 # elif defined(IEMNATIVE_REG_FIXED_PC_DBG) 560 694 off = iemNativePcAdjustCheck(pReNative, off); 561 695 # endif 562 if (pReNative->cCondDepth) 563 off = iemNativeEmitPcWriteback(pReNative, off); 564 else 565 pReNative->Core.cInstrPcUpdateSkipped++; 566 #endif 567 568 return off; 569 } 570 571 572 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \ 573 off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \ 574 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0) 575 576 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \ 577 off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \ 578 off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \ 579 off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0) 580 581 /** Same as iemRegAddToIp16AndFinishingNoFlags. */ 582 DECL_INLINE_THROW(uint32_t) 583 iemNativeEmitAddToIp16AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr) 584 { 585 #if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG) 586 # if defined(IEMNATIVE_REG_FIXED_PC_DBG) 587 if (!pReNative->Core.offPc) 588 off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 589 # endif 590 591 /* Allocate a temporary PC register. */ 592 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 593 594 /* Perform the addition and store the result. */ 595 off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr); 596 off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg); 597 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 598 599 /* Free but don't flush the PC register. */ 600 iemNativeRegFreeTmp(pReNative, idxPcReg); 601 #endif 602 603 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 604 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 605 606 pReNative->Core.offPc += cbInstr; 607 # if defined(IEMNATIVE_REG_FIXED_PC_DBG) 608 off = iemNativePcAdjustCheck(pReNative, off); 609 # endif 696 610 697 if (pReNative->cCondDepth) 611 698 off = iemNativeEmitPcWriteback(pReNative, off); … … 701 788 702 789 /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */ 790 /** @todo relax this one, we won't raise \#GP when a_fWithinPage is true. */ 703 791 off = iemNativeRegFlushPendingWrites(pReNative, off); 704 792 705 793 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 706 794 Assert(pReNative->Core.offPc == 0); 707 708 795 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 709 796 #endif … … 728 815 off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg); 729 816 } 817 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 818 off = iemNativeEmitPcDebugAdd(pReNative, off, (int64_t)offDisp + cbInstr, enmEffOpSize == IEMMODE_64BIT ? 64 : 16); 819 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg); 820 #endif 821 730 822 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 731 823 … … 819 911 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 820 912 Assert(pReNative->Core.offPc == 0); 821 822 913 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 823 914 #endif … … 836 927 if (!a_fFlat) 837 928 off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr); 929 930 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 931 off = iemNativeEmitPcDebugAdd(pReNative, off, offDisp + cbInstr, enmEffOpSize == IEMMODE_32BIT ? 32 : 16); 932 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg); 933 #endif 838 934 839 935 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); … … 883 979 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 884 980 Assert(pReNative->Core.offPc == 0); 885 886 981 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 887 982 #endif … … 894 989 off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg); 895 990 off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr); 991 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 992 off = iemNativeEmitPcDebugAdd(pReNative, off, offDisp + cbInstr, 16); 993 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg); 994 #endif 896 995 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 897 996 … … 991 1090 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 992 1091 Assert(pReNative->Core.offPc == 0); 993 994 1092 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 995 1093 #endif … … 1009 1107 /* Store the result. */ 1010 1108 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 1109 1110 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1111 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 1112 pReNative->Core.fDebugPcInitialized = true; 1113 Log4(("uPcUpdatingDebug=rip off=%#x\n", off)); 1114 #endif 1011 1115 1012 1116 iemNativeVarRegisterRelease(pReNative, idxVarPc); … … 1401 1505 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 1402 1506 Assert(pReNative->Core.offPc == 0); 1403 1404 1507 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 1405 1508 #endif … … 1458 1561 /* Store the result. */ 1459 1562 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxNewPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 1563 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1564 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxNewPcReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 1565 pReNative->Core.fDebugPcInitialized = true; 1566 Log4(("uPcUpdatingDebug=rip/indirect-call off=%#x\n", off)); 1567 #endif 1460 1568 1461 1569 #if 1 … … 1523 1631 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 1524 1632 Assert(pReNative->Core.offPc == 0); 1525 1526 1633 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 1527 1634 #endif … … 1554 1661 /* Store the result. */ 1555 1662 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 1663 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1664 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 1665 pReNative->Core.fDebugPcInitialized = true; 1666 Log4(("uPcUpdatingDebug=rip/rel-call-16 off=%#x offDisp=%d\n", off, offDisp)); 1667 #endif 1556 1668 1557 1669 /* Need to transfer the shadow information to the new RIP register. */ … … 1589 1701 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 1590 1702 Assert(pReNative->Core.offPc == 0); 1591 1592 1703 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 1593 1704 #endif … … 1617 1728 /* Store the result. */ 1618 1729 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 1730 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1731 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 1732 pReNative->Core.fDebugPcInitialized = true; 1733 Log4(("uPcUpdatingDebug=eip/rel-call-32 off=%#x offDisp=%d\n", off, offDisp)); 1734 #endif 1619 1735 1620 1736 /* Need to transfer the shadow information to the new RIP register. */ … … 1652 1768 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 1653 1769 Assert(pReNative->Core.offPc == 0); 1654 1655 1770 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal); 1656 1771 #endif … … 1678 1793 /* Store the result. */ 1679 1794 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 1795 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1796 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 1797 pReNative->Core.fDebugPcInitialized = true; 1798 Log4(("uPcUpdatingDebug=rip/rel-call-64 off=%#x offDisp=%ld\n", off, offDisp)); 1799 #endif 1680 1800 1681 1801 /* Need to transfer the shadow information to the new RIP register. */ … … 1702 1822 off = iemNativeEmitAddGpr16ImmEx(pCodeBuf, off, idxRegRsp, cbPopAdd); /* ASSUMES this does NOT modify bits [63:16]! */ 1703 1823 RT_NOREF(idxRegTmp); 1824 1704 1825 #elif defined(RT_ARCH_ARM64) 1705 1826 /* ubfiz regeff, regrsp, #0, #16 - copies bits 15:0 from RSP to EffSp bits 15:0, zeroing bits 63:16. */ … … 1716 1837 /* bfi regrsp, regeff, #0, #16 - moves bits 15:0 from tmp to RSP bits 15:0, keeping the other RSP bits as is. */ 1717 1838 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegRsp, idxRegTmp, 0, 16, false /*f64Bit*/); 1839 1718 1840 #else 1719 1841 # error "Port me" … … 1998 2120 1999 2121 /* Commit the result and clear any current guest shadows for RIP. */ 2000 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegRsp, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rsp));2122 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegRsp, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rsp)); 2001 2123 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegMemResult, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 2002 iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxRegMemResult, kIemNativeGstReg_Pc, off); 2124 iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxRegMemResult, kIemNativeGstReg_Pc, off); 2125 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 2126 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegMemResult, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 2127 pReNative->Core.fDebugPcInitialized = true; 2128 Log4(("uPcUpdatingDebug=rip/ret off=%#x\n", off)); 2129 #endif 2003 2130 2004 2131 /* Need to transfer the shadowing information to the host register containing the updated value now. */ … … 2464 2591 * (too many nestings) 2465 2592 */ 2466 DECL_INLINE_THROW(PIEMNATIVECOND) iemNativeCondPushIf(PIEMRECOMPILERSTATE pReNative, uint32_t *poff) 2467 { 2468 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 2469 *poff = iemNativeRegFlushPendingWrites(pReNative, *poff); 2470 #endif 2471 2593 DECL_INLINE_THROW(PIEMNATIVECOND) iemNativeCondPushIf(PIEMRECOMPILERSTATE pReNative) 2594 { 2472 2595 uint32_t const idxStack = pReNative->cCondDepth; 2473 2596 AssertStmt(idxStack < RT_ELEMENTS(pReNative->aCondStack), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_TOO_DEEPLY_NESTED)); … … 2508 2631 #endif 2509 2632 2510 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING2511 Assert(pReNative->Core.offPc == 0);2512 #endif2513 2514 2633 /* Copy the initial state so we can restore it in the 'else' block. */ 2515 2634 pEntry->InitialState = pReNative->Core; … … 2530 2649 Assert(!pEntry->fInElse); 2531 2650 2532 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2533 /* Writeback any dirty shadow registers. */ 2534 /** @todo r=aeichner Possible optimization is to only writeback guest registers which became dirty 2535 * in one of the branches and leave guest registers already dirty before the start of the if 2536 * block alone. */ 2537 off = iemNativeRegFlushDirtyGuest(pReNative, off); 2538 #endif 2539 2540 /* Jump to the endif */ 2541 off = iemNativeEmitJmpToLabel(pReNative, off, pEntry->idxLabelEndIf); 2651 /* We can skip dirty register flushing and the dirty register flushing if 2652 the branch already jumped to a TB exit. */ 2653 if (!pEntry->fIfExitTb) 2654 { 2655 #if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) && 0 2656 /* Writeback any dirty shadow registers. */ 2657 /** @todo r=aeichner Possible optimization is to only writeback guest registers which became dirty 2658 * in one of the branches and leave guest registers already dirty before the start of the if 2659 * block alone. */ 2660 off = iemNativeRegFlushDirtyGuest(pReNative, off); 2661 #endif 2662 2663 /* Jump to the endif. */ 2664 off = iemNativeEmitJmpToLabel(pReNative, off, pEntry->idxLabelEndIf); 2665 } 2666 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 2667 else 2668 Assert(pReNative->Core.offPc == 0); 2669 # endif 2542 2670 2543 2671 /* Define the else label and enter the else part of the condition. */ 2544 2672 iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off); 2545 2673 pEntry->fInElse = true; 2546 2547 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING2548 Assert(pReNative->Core.offPc == 0);2549 #endif2550 2674 2551 2675 /* Snapshot the core state so we can do a merge at the endif and restore … … 2568 2692 Assert(pReNative->cCondDepth > 0 && pReNative->cCondDepth <= RT_ELEMENTS(pReNative->aCondStack)); 2569 2693 PIEMNATIVECOND const pEntry = &pReNative->aCondStack[pReNative->cCondDepth - 1]; 2570 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 2571 Assert(pReNative->Core.offPc == 0); 2694 2695 #if defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) && 0 2696 off = iemNativeRegFlushDirtyGuest(pReNative, off); 2572 2697 #endif 2573 2698 … … 2576 2701 * other branch and skip all the merging headache. 2577 2702 */ 2703 bool fDefinedLabels = false; 2578 2704 if (pEntry->fElseExitTb || pEntry->fIfExitTb) 2579 2705 { … … 2600 2726 else 2601 2727 { 2602 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK2603 /* Writeback any dirty shadow registers (else branch). */2604 /** @todo r=aeichner Possible optimization is to only writeback guest registers which became dirty2605 * in one of the branches and leave guest registers already dirty before the start of the if2606 * block alone. */2607 off = iemNativeRegFlushDirtyGuest(pReNative, off);2608 #endif2609 2610 2728 /* 2611 2729 * Now we have find common group with the core state at the end of the … … 2617 2735 * But we'd need more info about future for that to be worth the effort. */ 2618 2736 PCIEMNATIVECORESTATE const pOther = pEntry->fInElse ? &pEntry->IfFinalState : &pEntry->InitialState; 2619 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2620 Assert( pOther->bmGstRegShadowDirty == 0 2621 && pReNative->Core.bmGstRegShadowDirty == 0); 2737 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 2738 AssertMsgStmt(pReNative->Core.offPc == pOther->offPc, 2739 ("Core.offPc=%#x pOther->offPc=%#x\n", pReNative->Core.offPc, pOther->offPc), 2740 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_ENDIF_RECONCILIATION_FAILED)); 2622 2741 #endif 2623 2742 2624 2743 if (memcmp(&pReNative->Core, pOther, sizeof(*pOther)) != 0) 2625 2744 { 2626 /* shadow guest stuff first. */ 2745 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2746 /* 2747 * If the branch has differences in dirty shadow registers, we will flush 2748 * the register only dirty in the current branch and dirty any that's only 2749 * dirty in the other one. 2750 */ 2751 uint64_t const fGstRegDirtyOther = pOther->bmGstRegShadowDirty; 2752 uint64_t const fGstRegDirtyThis = pReNative->Core.bmGstRegShadowDirty; 2753 uint64_t const fGstRegDirtyDiff = fGstRegDirtyOther ^ fGstRegDirtyThis; 2754 uint64_t const fGstRegDirtyHead = fGstRegDirtyThis & fGstRegDirtyDiff; 2755 uint64_t fGstRegDirtyTail = fGstRegDirtyOther & fGstRegDirtyDiff; 2756 if (!fGstRegDirtyDiff) 2757 { /* likely */ } 2758 else 2759 { 2760 //uint64_t const fGstRegDirtyHead = pReNative->Core.bmGstRegShadowDirty & fGstRegDirtyDiff; 2761 if (fGstRegDirtyHead) 2762 { 2763 Log12(("iemNativeEmitEndIf: flushing dirty guest registers in current branch: %RX64\n", fGstRegDirtyHead)); 2764 off = iemNativeRegFlushDirtyGuest(pReNative, off, fGstRegDirtyHead); 2765 } 2766 } 2767 #endif 2768 2769 /* 2770 * Shadowed guest registers. 2771 * 2772 * We drop any shadows where the two states disagree about where 2773 * things are kept. We may end up flushing dirty more registers 2774 * here, if the two branches keeps things in different registers. 2775 */ 2627 2776 uint64_t fGstRegs = pReNative->Core.bmGstRegShadows; 2628 2777 if (fGstRegs) … … 2634 2783 fGstRegs &= ~RT_BIT_64(idxGstReg); 2635 2784 2636 uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg]; 2637 if ( !(pOther->bmGstRegShadows & RT_BIT_64(idxGstReg)) 2638 || idxHstReg != pOther->aidxGstRegShadows[idxGstReg]) 2785 uint8_t const idxCurHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg]; 2786 uint8_t const idxOtherHstReg = pOther->aidxGstRegShadows[idxGstReg]; 2787 if ( idxCurHstReg != idxOtherHstReg 2788 || !(pOther->bmGstRegShadows & RT_BIT_64(idxGstReg))) 2639 2789 { 2640 Log12(("iemNativeEmitEndIf: dropping gst %s from hst %s\n", 2641 g_aGstShadowInfo[idxGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg])); 2642 2643 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2644 /* Writeback any dirty shadow registers we are about to unshadow. */ 2645 off = iemNativeRegFlushDirtyGuestByHostRegShadow(pReNative, off, idxHstReg); 2646 #endif 2647 iemNativeRegClearGstRegShadowing(pReNative, idxHstReg, off); 2790 #ifndef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2791 Log12(("iemNativeEmitEndIf: dropping gst %s (%d) from hst %s (other %d/%#RX64)\n", 2792 g_aGstShadowInfo[idxGstReg].pszName, idxGstReg, g_apszIemNativeHstRegNames[idxCurHstReg], 2793 idxOtherHstReg, pOther->bmGstRegShadows)); 2794 #else 2795 Log12(("iemNativeEmitEndIf: dropping %s gst %s (%d) from hst %s (other %d/%#RX64/%s)\n", 2796 pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg) ? "_dirty_" : "clean", 2797 g_aGstShadowInfo[idxGstReg].pszName, idxGstReg, g_apszIemNativeHstRegNames[idxCurHstReg], 2798 idxOtherHstReg, pOther->bmGstRegShadows, 2799 pOther->bmGstRegShadowDirty & RT_BIT_64(idxGstReg) ? "dirty" : "clean")); 2800 if (pOther->bmGstRegShadowDirty & RT_BIT_64(idxGstReg)) 2801 fGstRegDirtyTail |= RT_BIT_64(idxGstReg); 2802 if (pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg)) 2803 off = iemNativeRegFlushPendingWrite(pReNative, off, (IEMNATIVEGSTREG)idxGstReg); 2804 #endif 2805 iemNativeRegClearGstRegShadowingOne(pReNative, idxCurHstReg, (IEMNATIVEGSTREG)idxGstReg, off); 2648 2806 } 2649 2807 } while (fGstRegs); 2650 2808 } 2651 2809 else 2810 Assert(pReNative->Core.bmHstRegsWithGstShadow == 0); 2811 2812 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2813 /* 2814 * Generate jumpy code for flushing dirty registers from the other 2815 * branch that aren't dirty in the current one. 2816 */ 2817 if (!fGstRegDirtyTail) 2818 { /* likely */ } 2819 else 2652 2820 { 2653 Assert(pReNative->Core.bmHstRegsWithGstShadow == 0); 2654 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2655 Assert(pReNative->Core.bmGstRegShadowDirty == 0); 2656 #endif 2821 STAM_REL_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeEndIfOtherBranchDirty); 2822 Log12(("iemNativeEmitEndIf: Dirty register only in the other branch: %#RX64 - BAD!\n", fGstRegDirtyTail)); 2823 2824 /* First the current branch has to jump over the dirty flushing from the other branch. */ 2825 uint32_t const offFixup1 = off; 2826 off = iemNativeEmitJmpToFixed(pReNative, off, off + 10); 2827 2828 /* Put the endif and maybe else label here so the other branch ends up here. */ 2829 if (!pEntry->fInElse) 2830 iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off); 2831 else 2832 Assert(pReNative->paLabels[pEntry->idxLabelElse].off <= off); 2833 iemNativeLabelDefine(pReNative, pEntry->idxLabelEndIf, off); 2834 fDefinedLabels = true; 2835 2836 /* Flush the dirty guest registers from the other branch. */ 2837 while (fGstRegDirtyTail) 2838 { 2839 unsigned idxGstReg = ASMBitFirstSetU64(fGstRegDirtyTail) - 1; 2840 fGstRegDirtyTail &= ~RT_BIT_64(idxGstReg); 2841 Log12(("iemNativeEmitEndIf: tail flushing %s (%d) from other branch %d (cur %d/%#RX64)\n", 2842 g_aGstShadowInfo[idxGstReg].pszName, idxGstReg, pOther->aidxGstRegShadows[idxGstReg], 2843 pReNative->Core.aidxGstRegShadows[idxGstReg], pReNative->Core.bmGstRegShadows)); 2844 2845 off = iemNativeRegFlushPendingWriteEx(pReNative, off, (PIEMNATIVECORESTATE)pOther, (IEMNATIVEGSTREG)idxGstReg); 2846 2847 /* Mismatching shadowing should've been dropped in the previous step already. */ 2848 Assert( !(pReNative->Core.bmGstRegShadows & RT_BIT_64(idxGstReg)) 2849 || pReNative->Core.aidxGstRegShadows[idxGstReg] == pOther->aidxGstRegShadows[idxGstReg]); 2850 } 2851 2852 /* Here is the actual endif label, fixup the above jump to land here. */ 2853 iemNativeFixupFixedJump(pReNative, offFixup1, off); 2657 2854 } 2658 2659 /* Check variables next. For now we must require them to be identical 2660 or stuff we can recreate. */ 2855 #endif 2856 2857 /* 2858 * Check variables next. For now we must require them to be identical 2859 * or stuff we can recreate. (No code is emitted here.) 2860 */ 2661 2861 Assert(pReNative->Core.u64ArgVars == pOther->u64ArgVars); 2662 uint32_t fVars = pReNative->Core.bmVars | pOther->bmVars; 2862 #ifdef VBOX_STRICT 2863 uint32_t const offAssert = off; 2864 #endif 2865 uint32_t fVars = pReNative->Core.bmVars | pOther->bmVars; 2663 2866 if (fVars) 2664 2867 { … … 2702 2905 } while (fVars); 2703 2906 } 2704 2705 /* Finally, check that the host register allocations matches. */ 2706 AssertMsgStmt(pReNative->Core.bmHstRegs == pOther->bmHstRegs, 2907 Assert(off == offAssert); 2908 2909 /* 2910 * Finally, check that the host register allocations matches. 2911 */ 2912 AssertMsgStmt((pReNative->Core.bmHstRegs & (pReNative->Core.bmHstRegs ^ pOther->bmHstRegs)) == 0, 2707 2913 ("Core.bmHstRegs=%#x pOther->bmHstRegs=%#x - %#x\n", 2708 2914 pReNative->Core.bmHstRegs, pOther->bmHstRegs, pReNative->Core.bmHstRegs ^ pOther->bmHstRegs), … … 2714 2920 * Define the endif label and maybe the else one if we're still in the 'if' part. 2715 2921 */ 2716 if (!pEntry->fInElse) 2717 iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off); 2718 else 2719 Assert(pReNative->paLabels[pEntry->idxLabelElse].off <= off); 2720 iemNativeLabelDefine(pReNative, pEntry->idxLabelEndIf, off); 2922 if (!fDefinedLabels) 2923 { 2924 if (!pEntry->fInElse) 2925 iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off); 2926 else 2927 Assert(pReNative->paLabels[pEntry->idxLabelElse].off <= off); 2928 iemNativeLabelDefine(pReNative, pEntry->idxLabelEndIf, off); 2929 } 2721 2930 2722 2931 /* Pop the conditional stack.*/ … … 2735 2944 { 2736 2945 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl); 2737 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);2946 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 2738 2947 2739 2948 /* Get the eflags. */ … … 2762 2971 { 2763 2972 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl); 2764 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);2973 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 2765 2974 2766 2975 /* Get the eflags. */ … … 2789 2998 { 2790 2999 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 2791 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3000 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 2792 3001 2793 3002 /* Get the eflags. */ … … 2819 3028 { 2820 3029 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 2821 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3030 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 2822 3031 2823 3032 /* Get the eflags. */ … … 2855 3064 { 2856 3065 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBit1InEfl | fBit2InEfl); 2857 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3066 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 2858 3067 2859 3068 /* Get the eflags. */ … … 2929 3138 { 2930 3139 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl | fBit1InEfl | fBit2InEfl); 2931 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3140 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 2932 3141 2933 3142 /* We need an if-block label for the non-inverted variant. */ … … 3015 3224 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfCxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off) 3016 3225 { 3017 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3226 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3018 3227 3019 3228 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), … … 3038 3247 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfRcxEcxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit) 3039 3248 { 3040 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3249 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3041 3250 3042 3251 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), … … 3057 3266 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfCxIsNotOne(PIEMRECOMPILERSTATE pReNative, uint32_t off) 3058 3267 { 3059 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3268 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3060 3269 3061 3270 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), … … 3086 3295 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfRcxEcxIsNotOne(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit) 3087 3296 { 3088 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3297 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3089 3298 3090 3299 uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX), … … 3115 3324 { 3116 3325 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 3117 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3326 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3118 3327 3119 3328 /* We have to load both RCX and EFLAGS before we can start branching, … … 3177 3386 { 3178 3387 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 3179 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3388 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3180 3389 3181 3390 /* We have to load both RCX and EFLAGS before we can start branching, … … 3219 3428 iemNativeEmitIfLocalIsZ(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarLocal) 3220 3429 { 3221 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3430 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3222 3431 3223 3432 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarLocal); … … 3245 3454 iemNativeEmitIfGregBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t iBitNo) 3246 3455 { 3247 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative , &off);3456 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative); 3248 3457 Assert(iGReg < 16); 3249 3458 … … 3527 3736 fGstShwFlush |= RT_BIT_64(kIemNativeGstReg_EFlags); 3528 3737 iemNativeRegFlushGuestShadows(pReNative, fGstShwFlush); 3738 3739 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 3740 pReNative->Core.fDebugPcInitialized = false; 3741 Log4(("fDebugPcInitialized=false cimpl off=%#x (v1)\n", off)); 3742 #endif 3529 3743 3530 3744 return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr); … … 6703 6917 */ 6704 6918 /* Allocate a temporary PC register. */ 6705 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate); 6919 /** @todo r=bird: This would technically need to be done up front as it's a register allocation. */ 6920 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, 6921 kIemNativeGstRegUse_ForUpdate); 6706 6922 6707 6923 /* Perform the addition and store the result. */ 6708 6924 off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc); 6709 6925 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 6926 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 6927 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg); 6928 # endif 6710 6929 6711 6930 /* Free and flush the PC register. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r105739 r105853 8 8 * - Level 2 (Log2) : Details calls as they're recompiled. 9 9 * - Level 3 (Log3) : Disassemble native code after recompiling. 10 * - Level 4 (Log4) : ...10 * - Level 4 (Log4) : Delayed PC updating. 11 11 * - Level 5 (Log5) : ... 12 12 * - Level 6 (Log6) : ... … … 2072 2072 pReNative->Core.offPc = 0; 2073 2073 pReNative->Core.cInstrPcUpdateSkipped = 0; 2074 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 2075 pReNative->Core.fDebugPcInitialized = false; 2076 # endif 2074 2077 #endif 2075 2078 #ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR … … 2977 2980 #endif /* unused */ 2978 2981 2979 2980 2982 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2983 2981 2984 /** 2982 2985 * Stores the host reg @a idxHstReg into guest shadow register @a enmGstReg. … … 3002 3005 case sizeof(uint16_t): 3003 3006 return iemNativeEmitStoreGprToVCpuU16(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off); 3004 # if 0 /* not present in the table. */3007 # if 0 /* not present in the table. */ 3005 3008 case sizeof(uint8_t): 3006 3009 return iemNativeEmitStoreGprToVCpuU8(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off); 3007 # endif3010 # endif 3008 3011 default: 3009 3012 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE)); … … 3013 3016 3014 3017 /** 3015 * Emits code to flush a pending write of the given guest register if any. 3018 * Emits code to flush a pending write of the given guest register, 3019 * version with alternative core state. 3020 * 3021 * @returns New code buffer offset. 3022 * @param pReNative The native recompile state. 3023 * @param off Current code buffer position. 3024 * @param pCore Alternative core state. 3025 * @param enmGstReg The guest register to flush. 3026 */ 3027 DECL_HIDDEN_THROW(uint32_t) 3028 iemNativeRegFlushPendingWriteEx(PIEMRECOMPILERSTATE pReNative, uint32_t off, PIEMNATIVECORESTATE pCore, IEMNATIVEGSTREG enmGstReg) 3029 { 3030 uint8_t const idxHstReg = pCore->aidxGstRegShadows[enmGstReg]; 3031 3032 Assert( ( enmGstReg >= kIemNativeGstReg_GprFirst 3033 && enmGstReg <= kIemNativeGstReg_GprLast) 3034 || enmGstReg == kIemNativeGstReg_MxCsr); 3035 Assert( idxHstReg != UINT8_MAX 3036 && pCore->bmGstRegShadowDirty & RT_BIT_64(enmGstReg)); 3037 Log12(("iemNativeRegFlushPendingWriteEx: Clearing guest register %s shadowed by host %s (off=%#x)\n", 3038 g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg], off)); 3039 3040 off = iemNativeEmitStoreGprWithGstShadowReg(pReNative, off, enmGstReg, idxHstReg); 3041 3042 pCore->bmGstRegShadowDirty &= ~RT_BIT_64(enmGstReg); 3043 return off; 3044 } 3045 3046 3047 /** 3048 * Emits code to flush a pending write of the given guest register. 3016 3049 * 3017 3050 * @returns New code buffer offset. … … 3091 3124 iemNativeDbgInfoAddGuestRegWriteback(pReNative, false /*fSimdReg*/, pReNative->Core.bmGstRegShadowDirty & fGstRegShadows); 3092 3125 # endif 3093 /** @todo r=bird: This is a crap way of enumerating a bitmask where we're3094 * likely to only have a single bit set. It'll be in the 0..15 range,3095 * but still it's 15 unnecessary loops for the last guest register. */3096 3097 3126 uint64_t bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & fGstRegShadows; 3098 3127 do … … 3107 3136 return off; 3108 3137 } 3109 #endif 3138 3139 #endif /* IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK */ 3110 3140 3111 3141 … … 5667 5697 } 5668 5698 5669 5670 5699 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 5700 5701 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 5702 5703 /** 5704 * Checks if the value in @a idxPcReg matches IEMCPU::uPcUpdatingDebug. 5705 */ 5706 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheckWithReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxPcReg) 5707 { 5708 Assert(idxPcReg != IEMNATIVE_REG_FIXED_TMP0); 5709 Assert(pReNative->Core.fDebugPcInitialized); 5710 5711 /* cmp [pVCpu->iem.s.uPcUpdatingDebug], pcreg */ 5712 # ifdef RT_ARCH_AMD64 5713 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32); 5714 pCodeBuf[off++] = X86_OP_REX_W | (idxPcReg >= 8 ? X86_OP_REX_R : 0); 5715 pCodeBuf[off++] = 0x3b; 5716 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, idxPcReg & 7, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 5717 # else 5718 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 5719 off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug)); 5720 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, idxPcReg); 5721 # endif 5722 5723 uint32_t offFixup = off; 5724 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 1, kIemNativeInstrCond_e); 5725 off = iemNativeEmitBrkEx(pCodeBuf, off, UINT32_C(0x2200)); 5726 iemNativeFixupFixedJump(pReNative, offFixup, off); 5727 5728 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 5729 return off; 5730 } 5731 5732 5733 /** 5734 * Checks that the current RIP+offPc matches IEMCPU::uPcUpdatingDebug. 5735 */ 5736 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off) 5737 { 5738 if (pReNative->Core.fDebugPcInitialized) 5739 { 5740 uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc); 5741 if (pReNative->Core.offPc) 5742 { 5743 uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off); 5744 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, RT_ARCH_VAL == RT_ARCH_VAL_AMD64 ? 32 : 8); 5745 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, idxTmpReg, idxPcReg, pReNative->Core.offPc); 5746 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 5747 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxTmpReg); 5748 iemNativeRegFreeTmp(pReNative, idxTmpReg); 5749 } 5750 else 5751 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg); 5752 iemNativeRegFreeTmp(pReNative, idxPcReg); 5753 } 5754 return off; 5755 } 5756 5757 # endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG */ 5758 5671 5759 /** 5672 5760 * Emits code to update the guest RIP value by adding the current offset since the start of the last RIP update. … … 5675 5763 { 5676 5764 Assert(pReNative->Core.offPc); 5765 Log4(("offPc=%#x -> 0; off=%#x\n", pReNative->Core.offPc, off)); 5677 5766 # ifdef IEMNATIVE_WITH_TB_DEBUG_INFO 5678 5767 iemNativeDbgInfoAddNativeOffset(pReNative, off); … … 5687 5776 off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc); 5688 5777 off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip)); 5778 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 5779 off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg); 5780 # endif 5689 5781 5690 5782 /* Free but don't flush the PC register. */ … … 5702 5794 return off; 5703 5795 } 5796 5704 5797 #endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING */ 5705 5798 … … 6431 6524 #endif 6432 6525 6526 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 6527 pReNative->Core.fDebugPcInitialized = false; 6528 Log4(("fDebugPcInitialized=false cimpl off=%#x (v2)\n", off)); 6529 #endif 6530 6433 6531 /* 6434 6532 * Check the status code. … … 6516 6614 #else 6517 6615 # error "port me" 6616 #endif 6617 6618 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 6619 pReNative->Core.fDebugPcInitialized = false; 6620 Log4(("fDebugPcInitialized=false todo off=%#x (v2)\n", off)); 6518 6621 #endif 6519 6622 … … 8746 8849 ENTRY(iem.s.idxTbCurInstr), 8747 8850 ENTRY(iem.s.fSkippingEFlags), 8851 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 8852 ENTRY(iem.s.uPcUpdatingDebug), 8853 #endif 8748 8854 #ifdef VBOX_WITH_STATISTICS 8749 8855 ENTRY(iem.s.StatNativeTlbHitsForFetch), … … 10090 10196 * Actual work. 10091 10197 */ 10092 Log2(("%u[%u]: %s%s \n", idxCurCall, pCallEntry->idxInstr, g_apszIemThreadedFunctions[pCallEntry->enmFunction],10093 pfnRecom ? "(recompiled)" : "(todo)"));10198 Log2(("%u[%u]: %s%s (off=%#x)\n", idxCurCall, pCallEntry->idxInstr, 10199 g_apszIemThreadedFunctions[pCallEntry->enmFunction], pfnRecom ? "(recompiled)" : "(todo)", off)); 10094 10200 if (pfnRecom) /** @todo stats on this. */ 10095 10201 { -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r105805 r105853 809 809 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu); 810 810 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu); 811 811 # endif /* VBOX_WITH_STATISTICS */ 812 # ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 813 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 814 "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).", 815 "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu); 816 # endif 817 # ifdef VBOX_WITH_STATISTICS 812 818 # ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR 813 819 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, -
trunk/src/VBox/VMM/include/IEMInternal.h
r105805 r105853 112 112 #if defined(DOXYGEN_RUNNING) || 1 113 113 # define IEMNATIVE_WITH_DELAYED_PC_UPDATING 114 #endif 115 /** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 116 * Enabled delayed PC updating debugging code. 117 * This is an alternative to the ARM64-only IEMNATIVE_REG_FIXED_PC_DBG. */ 118 #if defined(DOXYGEN_RUNNING) || 0 119 # define IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 114 120 #endif 115 121 … … 2270 2276 /** Dummy entry for ppTbLookupEntryR3. */ 2271 2277 R3PTRTYPE(PIEMTB) pTbLookupEntryDummyR3; 2278 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 2279 /** The debug code advances this register as if it was CPUMCTX::rip and we 2280 * didn't do delayed PC updating. When CPUMCTX::rip is finally updated, 2281 * the result is compared with this value. */ 2282 uint64_t uPcUpdatingDebug; 2283 #else 2284 uint64_t u64Placeholder; 2285 #endif 2272 2286 /** @} */ 2273 2287 … … 2394 2408 STAMCOUNTER StatNativePcUpdateDelayed; 2395 2409 2410 /** Native recompiler: Number of time we had complicated dirty shadow 2411 * register situations with the other branch in IEM_MC_ENDIF. */ 2412 STAMCOUNTER StatNativeEndIfOtherBranchDirty; 2413 2396 2414 //#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR 2397 2415 /** Native recompiler: Number of calls to iemNativeSimdRegAllocFindFree. */ … … 2519 2537 2520 2538 #ifdef IEM_WITH_TLB_TRACE 2539 uint64_t au64Padding[4]; 2540 #else 2521 2541 uint64_t au64Padding[6]; 2522 #else2523 //uint64_t au64Padding[0];2524 2542 #endif 2525 2543 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r105818 r105853 1246 1246 uint32_t offPc; 1247 1247 /** Number of instructions where we could skip the updating. */ 1248 uint32_t cInstrPcUpdateSkipped; 1248 uint8_t cInstrPcUpdateSkipped; 1249 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1250 /** Set after we've loaded PC into uPcUpdatingDebug at the first update. */ 1251 bool fDebugPcInitialized; 1252 uint8_t abPadding[2]; 1253 # else 1254 uint8_t abPadding[3]; 1255 # endif 1249 1256 #endif 1250 1257 /** Allocation bitmap for aHstRegs. */ … … 1386 1393 /** Set if we're in the "else" part, clear if we're in the "if" before it. */ 1387 1394 bool fInElse; 1388 /** Set if the if-block unconditionally exited the TB. */ 1389 bool fIfExitTb; 1390 /** Set if the else-block unconditionally exited the TB. */ 1391 bool fElseExitTb; 1395 union 1396 { 1397 struct 1398 { 1399 /** Set if the if-block unconditionally exited the TB. */ 1400 bool fIfExitTb; 1401 /** Set if the else-block unconditionally exited the TB. */ 1402 bool fElseExitTb; 1403 }; 1404 /** Indexed by fInElse. */ 1405 bool afExitTb[2]; 1406 }; 1392 1407 bool afPadding[5]; 1393 1408 /** The label for the IEM_MC_ELSE. */ … … 1712 1727 uint64_t fGstSimdShwExcept); 1713 1728 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING 1729 # ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG 1730 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off); 1731 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheckWithReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxPcReg); 1732 # endif 1714 1733 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcWritebackSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off); 1715 1734 #endif 1716 1735 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 1717 1736 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTREG enmGstReg); 1718 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fFlushGstReg = UINT64_MAX); 1719 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuestByHostRegShadow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg); 1737 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWriteEx(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1738 PIEMNATIVECORESTATE pCore, IEMNATIVEGSTREG enmGstReg); 1739 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1740 uint64_t fFlushGstReg = UINT64_MAX); 1741 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuestByHostRegShadow(PIEMRECOMPILERSTATE pReNative, 1742 uint32_t off, uint8_t idxHstReg); 1720 1743 #endif 1721 1744 … … 2326 2349 Assert(pReNative->Core.bmHstRegsWithGstShadow & RT_BIT_32(idxHstReg)); 2327 2350 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK 2328 Assert(!(pReNative->Core. aHstRegs[idxHstReg].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));2351 Assert(!(pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(enmGstReg))); 2329 2352 #endif 2330 2353 -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r105673 r105853 4360 4360 */ 4361 4361 DECL_FORCE_INLINE_THROW(uint32_t) 4362 iemNativeEmitAddGpr32ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int32_t iAddend )4362 iemNativeEmitAddGpr32ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int32_t iAddend, uint8_t iGprTmp = UINT8_MAX) 4363 4363 { 4364 4364 #if defined(RT_ARCH_AMD64) … … 4375 4375 pCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend); 4376 4376 pCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend); 4377 RT_NOREF(iGprTmp); 4377 4378 4378 4379 #elif defined(RT_ARCH_ARM64) … … 4386 4387 if (uAbsAddend & 0xfffU) 4387 4388 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend & 0xfff, false /*f64Bit*/); 4389 } 4390 else if (iGprTmp != UINT8_MAX) 4391 { 4392 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprTmp, iAddend); 4393 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprDst, iGprTmp, false /*f64Bit*/); 4388 4394 } 4389 4395 else … … 8266 8272 8267 8273 /** 8274 * Helper for marking the current conditional branch as exiting the TB. 8275 * 8276 * This simplifies the state consolidation later when we reach the IEM_MC_ENDIF. 8277 */ 8278 DECL_FORCE_INLINE(void) iemNativeMarkCurCondBranchAsExiting(PIEMRECOMPILERSTATE pReNative) 8279 { 8280 uint8_t idxCondDepth = pReNative->cCondDepth; 8281 if (idxCondDepth) 8282 { 8283 idxCondDepth--; 8284 pReNative->aCondStack[idxCondDepth].afExitTb[pReNative->aCondStack[idxCondDepth].fInElse] = true; 8285 } 8286 } 8287 8288 8289 /** 8268 8290 * Emits a Jcc rel32 / B.cc imm19 to the given label (ASSUMED requiring fixup). 8269 8291 */ … … 8273 8295 { 8274 8296 Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason)); 8297 8275 8298 #if defined(IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE) && defined(RT_ARCH_AMD64) 8276 8299 /* jcc rel32 */ … … 8380 8403 { 8381 8404 Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason)); 8405 8406 iemNativeMarkCurCondBranchAsExiting(pReNative); 8407 8382 8408 #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE 8383 8409 # ifdef RT_ARCH_AMD64 … … 8409 8435 { 8410 8436 Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason)); 8437 8438 iemNativeMarkCurCondBranchAsExiting(pReNative); 8439 8411 8440 #ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE 8412 8441 # ifdef RT_ARCH_AMD64
Note:
See TracChangeset
for help on using the changeset viewer.