Changeset 100222 in vbox
- Timestamp:
- Jun 20, 2023 2:40:48 AM (18 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r100060 r100222 10487 10487 10488 10488 10489 /** 10490 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to 10491 * inject a pending TRPM trap. 10492 */ 10493 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu) 10494 { 10495 Assert(TRPMHasTrap(pVCpu)); 10496 10497 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) 10498 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 10499 { 10500 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */ 10501 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 10502 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx); 10503 if (fIntrEnabled) 10504 { 10505 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu))) 10506 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF; 10507 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))) 10508 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu)); 10509 else 10510 { 10511 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))); 10512 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu)); 10513 } 10514 } 10515 #else 10516 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF; 10517 #endif 10518 if (fIntrEnabled) 10519 { 10520 uint8_t u8TrapNo; 10521 TRPMEVENT enmType; 10522 uint32_t uErrCode; 10523 RTGCPTR uCr2; 10524 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/); 10525 AssertRC(rc2); 10526 Assert(enmType == TRPM_HARDWARE_INT); 10527 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/); 10528 10529 TRPMResetTrap(pVCpu); 10530 10531 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 10532 /* Injecting an event may cause a VM-exit. */ 10533 if ( rcStrict != VINF_SUCCESS 10534 && rcStrict != VINF_IEM_RAISED_XCPT) 10535 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 10536 #else 10537 NOREF(rcStrict); 10538 #endif 10539 } 10540 } 10541 10542 return VINF_SUCCESS; 10543 } 10544 10545 10489 10546 VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions) 10490 10547 { … … 10502 10559 else 10503 10560 { 10504 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) 10505 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 10506 { 10507 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */ 10508 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 10509 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx); 10510 if (fIntrEnabled) 10511 { 10512 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu))) 10513 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF; 10514 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))) 10515 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu)); 10516 else 10517 { 10518 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))); 10519 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu)); 10520 } 10521 } 10522 #else 10523 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF; 10524 #endif 10525 if (fIntrEnabled) 10526 { 10527 uint8_t u8TrapNo; 10528 TRPMEVENT enmType; 10529 uint32_t uErrCode; 10530 RTGCPTR uCr2; 10531 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/); 10532 AssertRC(rc2); 10533 Assert(enmType == TRPM_HARDWARE_INT); 10534 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/); 10535 10536 TRPMResetTrap(pVCpu); 10537 10538 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 10539 /* Injecting an event may cause a VM-exit. */ 10540 if ( rcStrict != VINF_SUCCESS 10541 && rcStrict != VINF_IEM_RAISED_XCPT) 10542 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 10543 #else 10544 NOREF(rcStrict); 10545 #endif 10546 } 10547 } 10561 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu); 10562 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 10563 { /*likely */ } 10564 else 10565 return rcStrict; 10548 10566 } 10549 10567 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r100108 r100222 314 314 * @{ 315 315 */ 316 317 318 /** 319 * Implements a pop [mem16]. 320 */ 321 IEM_CIMPL_DEF_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst) 322 { 323 uint16_t u16Value; 324 RTUINT64U TmpRsp; 325 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 326 VBOXSTRICTRC rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp); 327 if (rcStrict == VINF_SUCCESS) 328 { 329 rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value); 330 if (rcStrict == VINF_SUCCESS) 331 { 332 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 333 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 334 } 335 } 336 return rcStrict; 337 338 } 339 340 341 /** 342 * Implements a pop [mem32]. 343 */ 344 IEM_CIMPL_DEF_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst) 345 { 346 uint32_t u32Value; 347 RTUINT64U TmpRsp; 348 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 349 VBOXSTRICTRC rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp); 350 if (rcStrict == VINF_SUCCESS) 351 { 352 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEffDst, u32Value); 353 if (rcStrict == VINF_SUCCESS) 354 { 355 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 356 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 357 } 358 } 359 return rcStrict; 360 361 } 362 363 364 /** 365 * Implements a pop [mem64]. 366 */ 367 IEM_CIMPL_DEF_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst) 368 { 369 uint64_t u64Value; 370 RTUINT64U TmpRsp; 371 TmpRsp.u = pVCpu->cpum.GstCtx.rsp; 372 VBOXSTRICTRC rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp); 373 if (rcStrict == VINF_SUCCESS) 374 { 375 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrEffDst, u64Value); 376 if (rcStrict == VINF_SUCCESS) 377 { 378 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 379 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr); 380 } 381 } 382 return rcStrict; 383 384 } 385 316 386 317 387 /** -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r100148 r100222 5214 5214 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */ 5215 5215 5216 #ifndef TST_IEM_CHECK_MC 5216 #if 1 /* This can be compiled, optimize later if needed. */ 5217 switch (pVCpu->iem.s.enmEffOpSize) 5218 { 5219 case IEMMODE_16BIT: 5220 { 5221 IEM_MC_BEGIN(2, 0); 5222 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pVCpu->iem.s.iEffSeg, 0); 5223 IEM_MC_ARG( RTGCPTR, GCPtrEffDst, 1); 5224 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2 << 8); 5225 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5226 IEM_MC_CALL_CIMPL_2(0, iemCImpl_pop_mem16, iEffSeg, GCPtrEffDst); 5227 IEM_MC_END(); 5228 } 5229 5230 case IEMMODE_32BIT: 5231 { 5232 IEM_MC_BEGIN(2, 0); 5233 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pVCpu->iem.s.iEffSeg, 0); 5234 IEM_MC_ARG( RTGCPTR, GCPtrEffDst, 1); 5235 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4 << 8); 5236 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5237 IEM_MC_CALL_CIMPL_2(0, iemCImpl_pop_mem32, iEffSeg, GCPtrEffDst); 5238 IEM_MC_END(); 5239 } 5240 5241 case IEMMODE_64BIT: 5242 { 5243 IEM_MC_BEGIN(2, 0); 5244 IEM_MC_ARG_CONST(uint8_t, iEffSeg, /*=*/ pVCpu->iem.s.iEffSeg, 0); 5245 IEM_MC_ARG( RTGCPTR, GCPtrEffDst, 1); 5246 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 8 << 8); 5247 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5248 IEM_MC_CALL_CIMPL_2(0, iemCImpl_pop_mem64, iEffSeg, GCPtrEffDst); 5249 IEM_MC_END(); 5250 } 5251 5252 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 5253 } 5254 5255 #else 5256 # ifndef TST_IEM_CHECK_MC 5217 5257 /* Calc effective address with modified ESP. */ 5218 5258 /** @todo testcase */ … … 5271 5311 return rcStrict; 5272 5312 5273 # else5313 # else 5274 5314 return VERR_IEM_IPE_2; 5315 # endif 5275 5316 #endif 5276 5317 } -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp
r100202 r100222 31 31 *********************************************************************************************************************************/ 32 32 #ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */ 33 # define LOG_GROUP LOG_GROUP_IEM 33 # define LOG_GROUP LOG_GROUP_IEM_RE_THREADED 34 34 #endif 35 35 #define VMCPU_INCL_CPUM_GST_CTX … … 306 306 * Translation block management. 307 307 */ 308 typedef struct IEMTBCACHE 309 { 310 uint32_t cHash; 311 uint32_t uHashMask; 312 PIEMTB apHash[_64K]; 313 } IEMTBCACHE; 314 315 static IEMTBCACHE g_TbCache = { _64K, 0xffff, }; /**< Quick and dirty. */ 316 317 #define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \ 318 ( ((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask) 319 308 320 309 321 /** … … 374 386 375 387 376 static PIEMTB iemThreadedTbLookup(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPc, uint64_t uPc, uint32_t fExtraFlags) 377 { 378 RT_NOREF(pVM, pVCpu, GCPhysPc, uPc, fExtraFlags); 379 return NULL; 388 static PIEMTB iemThreadedTbLookup(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPc, uint32_t fExtraFlags) 389 { 390 uint32_t const fFlags = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK) | fExtraFlags | IEMTB_F_STATE_READY; 391 uint32_t const idxHash = IEMTBCACHE_HASH(&g_TbCache, fFlags, GCPhysPc); 392 Log10(("TB lookup: idxHash=%#x fFlags=%#x GCPhysPc=%RGp\n", idxHash, fFlags, GCPhysPc)); 393 PIEMTB pTb = g_TbCache.apHash[idxHash]; 394 while (pTb) 395 { 396 if (pTb->GCPhysPc == GCPhysPc) 397 { 398 if (pTb->fFlags == fFlags) 399 { 400 if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u) 401 { 402 #ifdef VBOX_WITH_STATISTICS 403 pVCpu->iem.s.cTbLookupHits++; 404 return pTb; 405 #endif 406 } 407 Log11(("TB miss: CS: %#x, wanted %#x\n", pTb->x86.fAttr, (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)); 408 } 409 else 410 Log11(("TB miss: fFlags: %#x, wanted %#x\n", pTb->fFlags, fFlags)); 411 } 412 else 413 Log11(("TB miss: GCPhysPc: %#x, wanted %#x\n", pTb->GCPhysPc, GCPhysPc)); 414 415 pTb = pTb->pNext; 416 } 417 RT_NOREF(pVM); 418 pVCpu->iem.s.cTbLookupMisses++; 419 return pTb; 420 } 421 422 423 static void iemThreadedTbAdd(PVMCC pVM, PVMCPUCC pVCpu, PIEMTB pTb) 424 { 425 uint32_t const idxHash = IEMTBCACHE_HASH(&g_TbCache, pTb->fFlags, pTb->GCPhysPc); 426 pTb->pNext = g_TbCache.apHash[idxHash]; 427 g_TbCache.apHash[idxHash] = pTb; 428 Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x\n", pTb, pTb->GCPhysPc, pTb->cbPC, pTb->fFlags, idxHash)); 429 RT_NOREF(pVM, pVCpu); 380 430 } 381 431 … … 593 643 Assert(pTb->Thrd.cCalls > cCallsPrev); 594 644 Assert(cCallsPrev - pTb->Thrd.cCalls < 5); 595 596 645 } 597 646 else if (pTb->Thrd.cCalls > 0) 598 {599 647 break; 600 }601 648 else 602 649 { … … 617 664 * Complete the TB and link it. 618 665 */ 666 pTb->fFlags = (pTb->fFlags & ~IEMTB_F_STATE_MASK) | IEMTB_F_STATE_READY; 667 iemThreadedTbAdd(pVM, pVCpu, pTb); 619 668 620 669 #ifdef IEM_COMPILE_ONLY_MODE … … 672 721 * This is called when the PC doesn't match the current pbInstrBuf. 673 722 */ 674 static uint64_t iemGetPcWithPhysAndCodeMissed(PVMCPUCC pVCpu, uint64_t const uPc, PRTGCPHYS pPhys)723 static RTGCPHYS iemGetPcWithPhysAndCodeMissed(PVMCPUCC pVCpu, uint64_t const uPc) 675 724 { 676 725 /** @todo see iemOpcodeFetchBytesJmp */ … … 682 731 pVCpu->iem.s.cbInstrBufTotal = 0; 683 732 684 RT_NOREF(uPc, pPhys); 685 return 0; 733 uint8_t bIgn; 734 iemOpcodeFetchBytesJmp(pVCpu, 1, &bIgn); 735 736 uint64_t off = uPc - pVCpu->iem.s.uInstrBufPc; 737 if (off < pVCpu->iem.s.cbInstrBufTotal) 738 { 739 pVCpu->iem.s.offInstrNextByte = (uint32_t)off; 740 pVCpu->iem.s.offCurInstrStart = (uint16_t)off; 741 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal) 742 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15; 743 else 744 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal; 745 746 return pVCpu->iem.s.GCPhysInstrBuf + off; 747 } 748 749 AssertFailed(); 750 RT_NOREF(uPc); 751 return NIL_RTGCPHYS; 686 752 } 687 753 688 754 689 755 /** @todo need private inline decl for throw/nothrow matching IEM_WITH_SETJMP? */ 690 DECL_FORCE_INLINE_THROW( uint64_t) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu, PRTGCPHYS pPhys)756 DECL_FORCE_INLINE_THROW(RTGCPHYS) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu) 691 757 { 692 758 /* Set uCurTbStartPc to RIP and calc the effective PC. */ … … 708 774 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal; 709 775 710 *pPhys = pVCpu->iem.s.GCPhysInstrBuf + off; 711 return uPc; 712 } 713 } 714 return iemGetPcWithPhysAndCodeMissed(pVCpu, uPc, pPhys); 776 return pVCpu->iem.s.GCPhysInstrBuf + off; 777 } 778 } 779 return iemGetPcWithPhysAndCodeMissed(pVCpu, uPc); 715 780 } 716 781 … … 747 812 VMMDECL(VBOXSTRICTRC) IEMExecRecompilerThreaded(PVMCC pVM, PVMCPUCC pVCpu) 748 813 { 814 /* 815 * See if there is an interrupt pending in TRPM, inject it if we can. 816 */ 817 if (!TRPMHasTrap(pVCpu)) 818 { /* likely */ } 819 else 820 { 821 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu); 822 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 823 { /*likely */ } 824 else 825 return rcStrict; 826 } 827 749 828 /* 750 829 * Init the execution environment. … … 769 848 { 770 849 /* Translate PC to physical address, we'll need this for both lookup and compilation. */ 771 RTGCPHYS GCPhysPc; 772 uint64_t const uPc = iemGetPcWithPhysAndCode(pVCpu, &GCPhysPc); 850 RTGCPHYS const GCPhysPc = iemGetPcWithPhysAndCode(pVCpu); 773 851 uint32_t const fExtraFlags = iemGetTbFlagsForCurrentPc(pVCpu); 774 852 775 pTb = iemThreadedTbLookup(pVM, pVCpu, GCPhysPc, uPc,fExtraFlags);853 pTb = iemThreadedTbLookup(pVM, pVCpu, GCPhysPc, fExtraFlags); 776 854 if (pTb) 777 855 rcStrict = iemThreadedTbExec(pVCpu, pTb); … … 779 857 rcStrict = iemThreadedCompile(pVM, pVCpu, GCPhysPc, fExtraFlags); 780 858 if (rcStrict == VINF_SUCCESS) 781 { /* likely */ } 859 { 860 Assert(pVCpu->iem.s.cActiveMappings == 0); 861 862 uint64_t fCpu = pVCpu->fLocalForcedActions; 863 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3 864 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 865 | VMCPU_FF_TLB_FLUSH 866 | VMCPU_FF_UNHALT ); 867 if (!fCpu) 868 { 869 /* likely */ 870 } 871 else 872 return VINF_SUCCESS; 873 } 782 874 else 783 875 return rcStrict; … … 788 880 { 789 881 pVCpu->iem.s.cLongJumps++; 882 if (pVCpu->iem.s.cActiveMappings > 0) 883 iemMemRollback(pVCpu); 790 884 if (pTb) 791 885 return rcStrict; -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r100145 r100222 202 202 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit; 203 203 } 204 205 #ifdef VBOX_WITH_IEM_RECOMPILER 206 /** @cfgm{/EM/IemRecompiled, bool, true} 207 * Whether IEM bulk execution is recompiled or interpreted. */ 208 rc = CFGMR3QueryBoolDef(pCfgEM, "IemRecompiled", &pVM->em.s.fIemRecompiled, true); 209 AssertLogRelRCReturn(rc, rc); 210 #endif 204 211 205 212 /* … … 2185 2192 case VINF_EM_RESCHEDULE_EXEC_ENGINE: 2186 2193 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM); 2194 if (!pVM->em.s.fIemExecutesAll) 2195 { 2187 2196 #if !defined(VBOX_VMM_TARGET_ARMV8) 2188 if (VM_IS_HM_ENABLED(pVM)) 2189 { 2190 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx)) 2197 if (VM_IS_HM_ENABLED(pVM)) 2191 2198 { 2192 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM)); 2193 pVCpu->em.s.enmState = EMSTATE_HM; 2194 break; 2199 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx)) 2200 { 2201 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM)); 2202 pVCpu->em.s.enmState = EMSTATE_HM; 2203 break; 2204 } 2195 2205 } 2196 } 2197 else 2198 #endif 2199 if (VM_IS_NEM_ENABLED(pVM)) 2200 { 2201 if (NEMR3CanExecuteGuest(pVM, pVCpu)) 2206 else 2207 #endif 2208 if (VM_IS_NEM_ENABLED(pVM) && NEMR3CanExecuteGuest(pVM, pVCpu)) 2202 2209 { 2203 2210 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM)); -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r98993 r100222 166 166 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 167 167 "Data TLB physical revision", "/IEM/CPU%u/DataTlb-PhysRev", idCpu); 168 169 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbAllocs, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 170 "Translation block allocations", "/IEM/CPU%u/cTbAllocs", idCpu); 171 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 172 "Translation block frees", "/IEM/CPU%u/cTbFrees", idCpu); 173 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbLookupHits, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 174 "Translation block lookup hits", "/IEM/CPU%u/cTbLookupHits", idCpu); 175 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbLookupMisses, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, 176 "Translation block lookup misses", "/IEM/CPU%u/cTbLookupMisses", idCpu); 168 177 169 178 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++) -
trunk/src/VBox/VMM/include/IEMInternal.h
r100183 r100222 999 999 /** Statistics: Number of TB free calls. */ 1000 1000 uint64_t cTbFrees; 1001 /** Statistics: Number of TB lookup misses. */ 1002 uint64_t cTbLookupMisses; 1003 /** Statistics: Number of TB lookup hits (debug only). */ 1004 uint64_t cTbLookupHits; 1001 1005 /** Whether to end the current TB. */ 1002 1006 bool fEndTb; … … 1004 1008 bool afRecompilerStuff1[7]; 1005 1009 /** Spaced reserved for recompiler data / alignment. */ 1006 uint64_t auRecompilerStuff2[ 3];1010 uint64_t auRecompilerStuff2[1]; 1007 1011 /** @} */ 1008 1012 … … 4098 4102 4099 4103 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu); 4104 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu); 4100 4105 4101 4106 … … 4378 4383 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' 4379 4384 * @{ */ 4385 IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst); 4386 IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst); 4387 IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst); 4380 4388 IEM_CIMPL_PROTO_0(iemCImpl_popa_16); 4381 4389 IEM_CIMPL_PROTO_0(iemCImpl_popa_32); -
trunk/src/VBox/VMM/include/IEMMc.h
r100072 r100222 1016 1016 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value))) 1017 1017 #define IEM_MC_POP_U64(a_pu64Value) \ 1018 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value))) 1019 1020 #define IEM_MC_POP_EX_U16(a_pu16Value, a_) \ 1021 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16Ex(pVCpu, (a_pu16Value), (a_pNewRsp))) 1022 #define IEM_MC_POP_EX_U32(a_pu32Value) \ 1023 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value))) 1024 #define IEM_MC_POP_EX_U64(a_pu64Value) \ 1018 1025 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value))) 1019 1026
Note:
See TracChangeset
for help on using the changeset viewer.