- Timestamp:
- Oct 17, 2024 12:02:12 PM (6 weeks ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp
r106315 r106443 1473 1473 * TlbLookup: 1474 1474 */ 1475 off = iemNativeEmitTlbLookup<false >(pReNative, off, &TlbState,1476 IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX : X86_SREG_CS,1477 1 /*cbMem*/, 0 /*fAlignMask*/, IEM_ACCESS_TYPE_EXEC,1478 idxLabelTlbLookup, idxLabelTlbMiss, idxRegGCPhys, offInstr);1475 off = iemNativeEmitTlbLookup<false, 1 /*cbMem*/, 0 /*fAlignMask*/, 1476 IEM_ACCESS_TYPE_EXEC>(pReNative, off, &TlbState, 1477 IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX : X86_SREG_CS, 1478 idxLabelTlbLookup, idxLabelTlbMiss, idxRegGCPhys, offInstr); 1479 1479 1480 1480 # ifdef IEM_WITH_TLB_STATISTICS … … 1771 1771 * TlbLookup: 1772 1772 */ 1773 off = iemNativeEmitTlbLookup<false, true>(pReNative, off, &TlbState, fIsFlat ? UINT8_MAX : X86_SREG_CS,1774 1 /*cbMem*/, 0 /*fAlignMask*/, IEM_ACCESS_TYPE_EXEC,1775 idxLabelTlbLookup, idxLabelTlbMiss, idxRegDummy);1773 off = iemNativeEmitTlbLookup<false, 1 /*cbMem*/, 0 /*fAlignMask*/, 1774 IEM_ACCESS_TYPE_EXEC, true>(pReNative, off, &TlbState, fIsFlat ? UINT8_MAX : X86_SREG_CS, 1775 idxLabelTlbLookup, idxLabelTlbMiss, idxRegDummy); 1776 1776 1777 1777 # ifdef IEM_WITH_TLB_STATISTICS -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r106432 r106443 1779 1779 * TlbLookup: 1780 1780 */ 1781 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, cbMem - 1, 1782 IEM_ACCESS_TYPE_WRITE, idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 1781 off = iemNativeEmitTlbLookup<true, cbMem, cbMem - 1, IEM_ACCESS_TYPE_WRITE>(pReNative, off, &TlbState, iSegReg, 1782 idxLabelTlbLookup, idxLabelTlbMiss, 1783 idxRegMemResult); 1783 1784 1784 1785 /* … … 2455 2456 * TlbLookup: 2456 2457 */ 2457 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, cbMem - 1, IEM_ACCESS_TYPE_READ, 2458 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 2458 off = iemNativeEmitTlbLookup<true, cbMem, cbMem - 1, IEM_ACCESS_TYPE_READ>(pReNative, off, &TlbState, iSegReg, 2459 idxLabelTlbLookup, idxLabelTlbMiss, 2460 idxRegMemResult); 2459 2461 2460 2462 /* … … 7608 7610 * TlbLookup: 7609 7611 */ 7610 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, a_cbMem, a_fAlignMaskAndCtl, 7611 a_enmOp == kIemNativeEmitMemOp_Store ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ, 7612 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult, offDisp); 7612 off = iemNativeEmitTlbLookup<true, a_cbMem, a_fAlignMaskAndCtl, 7613 a_enmOp == kIemNativeEmitMemOp_Store ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ 7614 >(pReNative, off, &TlbState, iSegReg, idxLabelTlbLookup, idxLabelTlbMiss, 7615 idxRegMemResult, offDisp); 7613 7616 7614 7617 /* … … 8263 8266 * Assert sanity. 8264 8267 */ 8268 AssertCompile(a_cBitsVar == 16 || a_cBitsVar == 32 || a_cBitsVar == 64); 8269 AssertCompile(a_cBitsFlat == 0 || a_cBitsFlat == 32 || a_cBitsFlat == 64); 8270 AssertCompile(!a_fIsSegReg || a_cBitsVar < 64); 8265 8271 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarValue); 8266 8272 PIEMNATIVEVAR const pVarValue = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVarValue)]; … … 8310 8316 * (Code structure is very similar to that of PUSH) 8311 8317 */ 8318 RT_CONSTEXPR 8312 8319 uint8_t const cbMem = a_cBitsVar / 8; 8313 8320 bool const fIsIntelSeg = a_fIsSegReg && IEM_IS_GUEST_CPU_INTEL(pReNative->pVCpu); 8314 uint8_t const cbMemAccess = ! fIsIntelSeg || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_16BIT8321 uint8_t const cbMemAccess = !a_fIsSegReg || !fIsIntelSeg || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_16BIT 8315 8322 ? cbMem : sizeof(uint16_t); 8316 8323 uint8_t const idxRegRsp = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xSP), … … 8473 8480 * TlbLookup: 8474 8481 */ 8475 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMemAccess, cbMemAccess - 1, 8476 IEM_ACCESS_TYPE_WRITE, idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 8482 if (!a_fIsSegReg || cbMemAccess == cbMem) 8483 { 8484 Assert(cbMemAccess == cbMem); 8485 off = iemNativeEmitTlbLookup<true, cbMem, cbMem - 1, IEM_ACCESS_TYPE_WRITE>(pReNative, off, &TlbState, 8486 iSegReg, idxLabelTlbLookup, 8487 idxLabelTlbMiss, idxRegMemResult); 8488 } 8489 else 8490 { 8491 Assert(cbMemAccess == sizeof(uint16_t)); 8492 off = iemNativeEmitTlbLookup<true, sizeof(uint16_t), sizeof(uint16_t) - 1, 8493 IEM_ACCESS_TYPE_WRITE>(pReNative, off, &TlbState, iSegReg, 8494 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 8495 } 8477 8496 8478 8497 /* … … 8492 8511 break; 8493 8512 case 4: 8494 if (! fIsIntelSeg)8513 if (!a_fIsSegReg || !fIsIntelSeg) 8495 8514 off = iemNativeEmitStoreGpr32ByGprEx(pCodeBuf, off, idxRegValue, idxRegMemResult); 8496 8515 else … … 8816 8835 * TlbLookup: 8817 8836 */ 8818 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, cbMem - 1, IEM_ACCESS_TYPE_READ, 8819 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 8837 off = iemNativeEmitTlbLookup<true, cbMem, cbMem - 1, IEM_ACCESS_TYPE_READ>(pReNative, off, &TlbState, iSegReg, 8838 idxLabelTlbLookup, idxLabelTlbMiss, 8839 idxRegMemResult); 8820 8840 8821 8841 /* … … 9414 9434 * TlbLookup: 9415 9435 */ 9416 off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, a_cbMem, a_fAlignMaskAndCtl, a_fAccess, 9417 idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult); 9436 off = iemNativeEmitTlbLookup<true, a_cbMem, a_fAlignMaskAndCtl, a_fAccess>(pReNative, off, &TlbState, iSegReg, 9437 idxLabelTlbLookup, idxLabelTlbMiss, 9438 idxRegMemResult); 9418 9439 # ifdef IEM_WITH_TLB_STATISTICS 9419 9440 off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2, -
trunk/src/VBox/VMM/include/IEMN8veRecompilerTlbLookup.h
r106407 r106443 305 305 * @param pTlbState . 306 306 * @param iSegReg . 307 * @param cbMem . 308 * @param fAlignMaskAndCtl The low 8-bit is the alignment mask, ie. a 307 * @param idxLabelTlbLookup . 308 * @param idxLabelTlbMiss . 309 * @param idxRegMemResult . 310 * @param offDisp . 311 * @tparam a_cbMem . 312 * @tparam a_fAlignMaskAndCtl The low 8-bit is the alignment mask, ie. a 309 313 * 128-bit aligned access passes 15. This is only 310 314 * applied to ring-3 code, when dictated by the … … 318 322 * tlbmiss on anything out of alignment according 319 323 * to the mask in the low 8 bits. 320 * @param fAccess . 321 * @param idxLabelTlbLookup . 322 * @param idxLabelTlbMiss . 323 * @param idxRegMemResult . 324 * @param offDisp . 324 * @tparam a_fAccess . 325 325 * @tparam a_fDataTlb . 326 326 * @tparam a_fNoReturn . 327 327 */ 328 template<bool const a_fDataTlb, bool const a_fNoReturn = false> 328 template<bool const a_fDataTlb, const uint8_t a_cbMem, uint32_t a_fAlignMaskAndCtl, uint32_t a_fAccess, 329 bool const a_fNoReturn = false> 329 330 DECL_INLINE_THROW(uint32_t) 330 331 iemNativeEmitTlbLookup(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEEMITTLBSTATE const * const pTlbState, 331 uint8_t iSegReg, uint8_t cbMem, uint32_t fAlignMaskAndCtl, uint32_t fAccess, 332 uint32_t idxLabelTlbLookup, uint32_t idxLabelTlbMiss, uint8_t idxRegMemResult, 332 uint8_t iSegReg, uint32_t idxLabelTlbLookup, uint32_t idxLabelTlbMiss, uint8_t idxRegMemResult, 333 333 uint8_t offDisp = 0) 334 334 { … … 356 356 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxRegSegLimit, 357 357 (uint32_t)(pTlbState->uAbsPtr + offDisp)); 358 else if (cbMem == 1)358 else if RT_CONSTEXPR_IF(a_cbMem == 1) 359 359 off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, pTlbState->idxReg2); 360 360 else … … 378 378 if (pTlbState->idxRegPtr != UINT8_MAX) 379 379 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1, 380 cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr);380 a_cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr); 381 381 else 382 382 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, 383 (uint32_t)(pTlbState->uAbsPtr + offDisp + cbMem - 1)); /* fSkip=true on overflow. */383 (uint32_t)(pTlbState->uAbsPtr + offDisp + a_cbMem - 1)); /* fSkip=true on overflow. */ 384 384 /* jbe tlbmiss */ 385 385 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_be); … … 400 400 uint8_t const idxRegFlatPtr = iSegReg != UINT8_MAX || pTlbState->idxRegPtr == UINT8_MAX || offDisp != 0 401 401 ? idxRegMemResult : pTlbState->idxRegPtr; /* (not immediately ready for tlblookup use) */ 402 uint8_t const fAlignMask = a_fDataTlb ? (uint8_t)fAlignMaskAndCtl : 0; 402 RT_CONSTEXPR 403 uint8_t const fAlignMask = a_fDataTlb ? (uint8_t)(a_fAlignMaskAndCtl & 0xff) : 0; 403 404 if (a_fDataTlb) 404 405 { 405 Assert (!(fAlignMaskAndCtl & ~(UINT32_C(0xff) | IEM_MEMMAP_F_ALIGN_SSE | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC)));406 AssertCompile(!(a_fAlignMaskAndCtl & ~(UINT32_C(0xff) | IEM_MEMMAP_F_ALIGN_SSE | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC))); 406 407 Assert(RT_IS_POWER_OF_TWO(fAlignMask + 1U)); 407 Assert( cbMem == fAlignMask + 1U || !(fAccess & IEM_ACCESS_ATOMIC));408 Assert( cbMem < 128); /* alignment test assumptions */408 Assert(a_cbMem == fAlignMask + 1U || !(a_fAccess & IEM_ACCESS_ATOMIC)); 409 Assert(a_cbMem < 128); /* alignment test assumptions */ 409 410 } 410 411 … … 412 413 uint32_t offFixupMisalignedAccessJmpBack = UINT32_MAX; 413 414 if ( a_fDataTlb 414 && !( fAlignMaskAndCtl & ~UINT32_C(0xff))415 && !( fAccess & IEM_ACCESS_ATOMIC)416 && cbMem > 1417 && RT_IS_POWER_OF_TWO( cbMem)415 && !(a_fAlignMaskAndCtl & ~UINT32_C(0xff)) 416 && !(a_fAccess & IEM_ACCESS_ATOMIC) 417 && a_cbMem > 1 418 && RT_IS_POWER_OF_TWO(a_cbMem) 418 419 && !(pReNative->fExec & IEM_F_X86_AC)) 419 420 { … … 422 423 /* reg1 = regflat & 0xfff */ 423 424 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,/*=*/ idxRegFlatPtr,/*&*/ GUEST_PAGE_OFFSET_MASK); 424 /* cmp reg1, GUEST_PAGE_SIZE - cbMem */425 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE - cbMem);425 /* cmp reg1, GUEST_PAGE_SIZE - a_cbMem */ 426 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE - a_cbMem); 426 427 /* jbe short jmpback */ 427 428 offFixupMisalignedAccessJmpBack = off; … … 542 543 For write access this means a writable data segment. 543 544 For read-only accesses this means a readable code segment or any data segment. */ 544 if (fAccess & IEM_ACCESS_TYPE_WRITE)545 if RT_CONSTEXPR_IF((a_fAccess & IEM_ACCESS_TYPE_WRITE) != 0) 545 546 { 546 547 uint32_t const fMustBe1 = X86DESCATTR_P | X86DESCATTR_DT | X86_SEL_TYPE_WRITE; … … 589 590 /* If we're accessing more than one byte or if we're working with a non-zero offDisp, 590 591 put the last address we'll be accessing in idxReg2 (64-bit). */ 591 if (( cbMem > 1 || offDisp != 0) && pTlbState->idxRegPtr != UINT8_MAX)592 if ((a_cbMem > 1 || offDisp != 0) && pTlbState->idxRegPtr != UINT8_MAX) 592 593 { 593 594 if (!offDisp) 594 /* reg2 = regptr + cbMem - 1; 64-bit result so we can fend of wraparounds/overflows. */ 595 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, pTlbState->idxReg2,/*=*/ pTlbState->idxRegPtr,/*+*/ cbMem - 1); 595 /* reg2 = regptr + a_cbMem - 1; 64-bit result so we can fend of wraparounds/overflows. */ 596 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, 597 pTlbState->idxReg2,/*=*/ pTlbState->idxRegPtr,/*+*/ a_cbMem - 1); 596 598 else 597 599 { 598 /* reg2 = (uint32_t)(regptr + offDisp) + cbMem - 1;. */600 /* reg2 = (uint32_t)(regptr + offDisp) + a_cbMem - 1;. */ 599 601 off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off, 600 602 pTlbState->idxReg2,/*=*/ pTlbState->idxRegPtr,/*+*/ + offDisp); 601 off = iemNativeEmitAddGprImmEx(pCodeBuf, off, pTlbState->idxReg2, cbMem - 1);603 off = iemNativeEmitAddGprImmEx(pCodeBuf, off, pTlbState->idxReg2, a_cbMem - 1); 602 604 } 603 605 } … … 608 610 * we need to check that code/data=0 and expanddown=1 before continuing. 609 611 */ 610 if (fAccess & IEM_ACCESS_TYPE_WRITE)612 if RT_CONSTEXPR_IF((a_fAccess & IEM_ACCESS_TYPE_WRITE) != 0) 611 613 { 612 614 /* test segattrs, X86_SEL_TYPE_DOWN */ … … 631 633 if (pTlbState->idxRegPtr != UINT8_MAX) 632 634 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, 633 cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr);635 a_cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr); 634 636 else 635 637 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxRegSegLimit, 636 (uint32_t)pTlbState->uAbsPtr + offDisp + cbMem - 1U); /* fSkip=true on overflow. */638 (uint32_t)pTlbState->uAbsPtr + offDisp + a_cbMem - 1U); /* fSkip=true on overflow. */ 637 639 /* jbe tlbmiss */ 638 640 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_be); … … 699 701 * 700 702 * The caller informs us about about SSE/AVX aligned accesses via the 701 * upper bits of fAlignMaskAndCtl and atomic accesses viafAccess.703 * upper bits of a_fAlignMaskAndCtl and atomic accesses via a_fAccess. 702 704 */ 703 705 if (a_fDataTlb) … … 706 708 { 707 709 #ifdef RT_ARCH_ARM64 708 if (cbMem == 2)710 if RT_CONSTEXPR_IF(a_cbMem == 2) 709 711 { 710 712 /* tbnz regflatptr, #0, tlbmiss */ … … 715 717 { 716 718 /* test regflat, fAlignMask */ 717 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, cbMem - 1);719 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, a_cbMem - 1); 718 720 /* jnz tlbmiss */ 719 721 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offMisalignedAccess, kIemNativeInstrCond_ne); … … 729 731 */ 730 732 bool const fStrictAlignmentCheck = fAlignMask 731 && ( ( fAlignMaskAndCtl & ~UINT32_C(0xff))732 || ( fAccess & IEM_ACCESS_ATOMIC)733 && ( (a_fAlignMaskAndCtl & ~UINT32_C(0xff)) 734 || (a_fAccess & IEM_ACCESS_ATOMIC) 733 735 || (pReNative->fExec & IEM_F_X86_AC) ); 734 736 if (fStrictAlignmentCheck) … … 756 758 * alignment check above. 757 759 */ 758 if ( cbMem > 1760 if ( a_cbMem > 1 759 761 && ( !fStrictAlignmentCheck 760 || cbMem > fAlignMask + 1U))762 || a_cbMem > fAlignMask + 1U)) 761 763 { 762 764 /* reg1 = regflat & 0xfff */ 763 765 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,/*=*/ idxRegFlatPtr,/*&*/ GUEST_PAGE_OFFSET_MASK); 764 /* cmp reg1, GUEST_PAGE_SIZE - cbMem */765 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE - cbMem);766 /* cmp reg1, GUEST_PAGE_SIZE - a_cbMem */ 767 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE - a_cbMem); 766 768 #ifndef IEM_WITH_TLB_STATISTICS 767 769 /* ja tlbmiss */ … … 780 782 } 781 783 else 782 Assert( fAlignMaskAndCtl == 0);784 Assert(a_fAlignMaskAndCtl == 0); 783 785 784 786 /* … … 1003 1005 uint64_t fTlbe = IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PT_NO_ACCESSED 1004 1006 | fNoUser; 1005 if (fAccess & IEM_ACCESS_TYPE_EXEC)1007 if RT_CONSTEXPR_IF((a_fAccess & IEM_ACCESS_TYPE_EXEC) != 0) 1006 1008 fTlbe |= IEMTLBE_F_PT_NO_EXEC /*| IEMTLBE_F_PG_NO_READ?*/; 1007 if (fAccess & IEM_ACCESS_TYPE_READ)1009 if RT_CONSTEXPR_IF((a_fAccess & IEM_ACCESS_TYPE_READ) != 0) 1008 1010 fTlbe |= IEMTLBE_F_PG_NO_READ; 1009 if (fAccess & IEM_ACCESS_TYPE_WRITE)1011 if RT_CONSTEXPR_IF((a_fAccess & IEM_ACCESS_TYPE_WRITE) != 0) 1010 1012 fTlbe |= IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY; 1011 1013 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, fTlbe); … … 1183 1185 * 1184 1186 * It's like the state logging, so parameters are passed on the stack. 1185 * iemNativeHlpAsmSafeWrapCheckTlbLookup(pVCpu, result, addr, seg | ( cbMem << 8) | (fAccess << 16))1187 * iemNativeHlpAsmSafeWrapCheckTlbLookup(pVCpu, result, addr, seg | (a_cbMem << 8) | (a_fAccess << 16)) 1186 1188 */ 1187 1189 if (a_fDataTlb) 1188 1190 { 1189 1191 # ifdef RT_ARCH_AMD64 1190 if (!offDisp && !( fAccess & 0x8000))1191 { 1192 /* push seg | ( cbMem << 8) | (fAccess << 16) */1192 if (!offDisp && !(a_fAccess & 0x8000)) 1193 { 1194 /* push seg | (a_cbMem << 8) | (a_fAccess << 16) */ 1193 1195 pCodeBuf[off++] = 0x68; 1194 1196 pCodeBuf[off++] = iSegReg; 1195 pCodeBuf[off++] = cbMem;1196 pCodeBuf[off++] = RT_BYTE1( fAccess);1197 pCodeBuf[off++] = RT_BYTE2( fAccess);1198 } 1199 else 1200 { 1201 /* mov reg1, seg | ( cbMem << 8) | (fAccess << 16) | (offDisp << 32) */1197 pCodeBuf[off++] = a_cbMem; 1198 pCodeBuf[off++] = RT_BYTE1(a_fAccess); 1199 pCodeBuf[off++] = RT_BYTE2(a_fAccess); 1200 } 1201 else 1202 { 1203 /* mov reg1, seg | (a_cbMem << 8) | (a_fAccess << 16) | (offDisp << 32) */ 1202 1204 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, 1203 iSegReg | ((uint32_t) cbMem << 8) | (fAccess << 16) | ((uint64_t)offDisp << 32));1205 iSegReg | ((uint32_t)a_cbMem << 8) | (a_fAccess << 16) | ((uint64_t)offDisp << 32)); 1204 1206 /* push reg1 */ 1205 1207 if (pTlbState->idxReg1 >= 8) … … 1233 1235 # elif defined(RT_ARCH_ARM64) 1234 1236 /* Use the temporary registers for setting up the "call frame" and making the call. */ 1235 /* reg1 = seg | ( cbMem << 8) | (fAccess << 16) */1236 pCodeBuf[off++] = Armv8A64MkInstrMovZ(pTlbState->idxReg1, RT_MAKE_U16(iSegReg, cbMem));1237 pCodeBuf[off++] = Armv8A64MkInstrMovK(pTlbState->idxReg1, RT_LO_U16( fAccess), 1);1237 /* reg1 = seg | (a_cbMem << 8) | (a_fAccess << 16) */ 1238 pCodeBuf[off++] = Armv8A64MkInstrMovZ(pTlbState->idxReg1, RT_MAKE_U16(iSegReg, a_cbMem)); 1239 pCodeBuf[off++] = Armv8A64MkInstrMovK(pTlbState->idxReg1, RT_LO_U16(a_fAccess), 1); 1238 1240 if (offDisp) 1239 1241 pCodeBuf[off++] = Armv8A64MkInstrMovK(pTlbState->idxReg1, offDisp, 2);
Note:
See TracChangeset
for help on using the changeset viewer.