Changeset 47307 in vbox
- Timestamp:
- Jul 22, 2013 2:34:36 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 87447
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/src/VBox/VMM/Makefile.kmk ¶
r46814 r47307 703 703 endif 704 704 705 ifneq ($(USERNAME),bird) # Not opts for me atm.706 705 # 707 706 # Always optimize the interpreter. … … 718 717 VMMAll/IEMAll.cpp_CXXFLAGS += -fomit-frame-pointer # Omitting the frame pointer results in larger code, but it might be worth it. (esp addressing vs ebp?) 719 718 endif 720 endif # No optimizations for me atm.721 719 722 720 include $(FILE_KBUILD_SUB_FOOTER) -
TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAll.cpp ¶
r47291 r47307 303 303 304 304 /** 305 * Checks if an Intel CPUID feature is present in the host CPU. 306 */ 307 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) \ 308 ( (a_fEdx) & pIemCpu->fHostCpuIdStdFeaturesEdx ) 309 310 /** 305 311 * Evaluates to true if we're presenting an Intel CPU to the guest. 306 312 */ 307 #define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */313 #define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_INTEL ) 308 314 309 315 /** 310 316 * Evaluates to true if we're presenting an AMD CPU to the guest. 311 317 */ 312 #define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */318 #define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) ( (a_pIemCpu)->enmCpuVendor == CPUMCPUVENDOR_AMD ) 313 319 314 320 /** … … 7012 7018 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pIemCpu, (bRm), (cbImm), &(a_GCPtrEff))) 7013 7019 7020 #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)() 7014 7021 #define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0)) 7015 7022 #define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1)) -
TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm ¶
r47173 r47307 1035 1035 IEMIMPL_UNARY_OP neg, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 1036 1036 IEMIMPL_UNARY_OP not, 0, 0 1037 1038 1039 ;; 1040 ; Macro for implementing memory fence operation. 1041 ; 1042 ; No return value, no operands or anything. 1043 ; 1044 ; @param 1 The instruction. 1045 ; 1046 %macro IEMIMPL_MEM_FENCE 1 1047 BEGINCODE 1048 BEGINPROC_FASTCALL iemAImpl_ %+ %1, 0 1049 %1 1050 ret 1051 ENDPROC iemAImpl_ %+ %1 1052 %endmacro 1053 1054 IEMIMPL_MEM_FENCE lfence 1055 IEMIMPL_MEM_FENCE sfence 1056 IEMIMPL_MEM_FENCE mfence 1057 1058 ;; 1059 ; Alternative for non-SSE2 host. 1060 ; 1061 BEGINPROC_FASTCALL iemAImpl_alt_mem_fence, 0 1062 push xAX 1063 xchg xAX, [xSP] 1064 add xSP, xCB 1065 ret 1066 ENDPROC iemAImpl_alt_mem_fence 1037 1067 1038 1068 -
TabularUnified trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h ¶
r47292 r47307 1384 1384 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ 1385 1385 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L)) 1386 return IEMOP_RAISE_INVALID_ LOCK_PREFIX(); /* #UD takes precedence over #GP(), see test. */1386 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ 1387 1387 iCrReg |= 8; 1388 1388 } … … 1430 1430 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */ 1431 1431 if (!IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(X86_CPUID_AMD_FEATURE_ECX_CR8L)) 1432 return IEMOP_RAISE_INVALID_ LOCK_PREFIX(); /* #UD takes precedence over #GP(), see test. */1432 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */ 1433 1433 iCrReg |= 8; 1434 1434 } … … 3844 3844 IEMOP_HLP_NO_LOCK_PREFIX(); 3845 3845 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR)) 3846 return IEMOP_RAISE_INVALID_ LOCK_PREFIX();3846 return IEMOP_RAISE_INVALID_OPCODE(); 3847 3847 3848 3848 IEM_MC_BEGIN(3, 1); … … 3863 3863 IEMOP_HLP_NO_LOCK_PREFIX(); 3864 3864 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_FXSR)) 3865 return IEMOP_RAISE_INVALID_ LOCK_PREFIX();3865 return IEMOP_RAISE_INVALID_OPCODE(); 3866 3866 3867 3867 IEM_MC_BEGIN(3, 1); … … 3894 3894 FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm); 3895 3895 3896 3896 3897 /** Opcode 0x0f 0xae 11b/5. */ 3897 FNIEMOP_STUB_1(iemOp_Grp15_lfence, uint8_t, bRm); 3898 FNIEMOP_DEF_1(iemOp_Grp15_lfence, uint8_t, bRm) 3899 { 3900 IEMOP_MNEMONIC("lfence"); 3901 IEMOP_HLP_NO_LOCK_PREFIX(); 3902 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2)) 3903 return IEMOP_RAISE_INVALID_OPCODE(); 3904 3905 IEM_MC_BEGIN(0, 0); 3906 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2)) 3907 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence); 3908 else 3909 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence); 3910 IEM_MC_ADVANCE_RIP(); 3911 IEM_MC_END(); 3912 return VINF_SUCCESS; 3913 } 3914 3898 3915 3899 3916 /** Opcode 0x0f 0xae 11b/6. */ 3900 FNIEMOP_STUB_1(iemOp_Grp15_mfence, uint8_t, bRm); 3917 FNIEMOP_DEF_1(iemOp_Grp15_mfence, uint8_t, bRm) 3918 { 3919 IEMOP_MNEMONIC("mfence"); 3920 IEMOP_HLP_NO_LOCK_PREFIX(); 3921 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2)) 3922 return IEMOP_RAISE_INVALID_OPCODE(); 3923 3924 IEM_MC_BEGIN(0, 0); 3925 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2)) 3926 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence); 3927 else 3928 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence); 3929 IEM_MC_ADVANCE_RIP(); 3930 IEM_MC_END(); 3931 return VINF_SUCCESS; 3932 } 3933 3901 3934 3902 3935 /** Opcode 0x0f 0xae 11b/7. */ 3903 FNIEMOP_STUB_1(iemOp_Grp15_sfence, uint8_t, bRm); 3936 FNIEMOP_DEF_1(iemOp_Grp15_sfence, uint8_t, bRm) 3937 { 3938 IEMOP_MNEMONIC("sfence"); 3939 IEMOP_HLP_NO_LOCK_PREFIX(); 3940 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_SSE2)) 3941 return IEMOP_RAISE_INVALID_OPCODE(); 3942 3943 IEM_MC_BEGIN(0, 0); 3944 if (IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(X86_CPUID_FEATURE_EDX_SSE2)) 3945 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence); 3946 else 3947 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence); 3948 IEM_MC_ADVANCE_RIP(); 3949 IEM_MC_END(); 3950 return VINF_SUCCESS; 3951 } 3952 3904 3953 3905 3954 /** Opcode 0xf3 0x0f 0xae 11b/0. */ … … 8736 8785 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ 8737 8786 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 8738 return IEMOP_RAISE_INVALID_ LOCK_PREFIX(); /* no register form */8787 return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */ 8739 8788 8740 8789 switch (pIemCpu->enmEffOpSize) … … 10439 10488 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break; 10440 10489 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break; 10441 case 6: return IEMOP_RAISE_INVALID_ LOCK_PREFIX();10490 case 6: return IEMOP_RAISE_INVALID_OPCODE(); 10442 10491 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */ 10443 10492 } … … 10499 10548 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break; 10500 10549 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break; 10501 case 6: return IEMOP_RAISE_INVALID_ LOCK_PREFIX();10550 case 6: return IEMOP_RAISE_INVALID_OPCODE(); 10502 10551 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */ 10503 10552 } … … 10870 10919 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break; 10871 10920 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break; 10872 case 6: return IEMOP_RAISE_INVALID_ LOCK_PREFIX();10921 case 6: return IEMOP_RAISE_INVALID_OPCODE(); 10873 10922 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */ 10874 10923 } … … 10928 10977 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break; 10929 10978 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break; 10930 case 6: return IEMOP_RAISE_INVALID_ LOCK_PREFIX();10979 case 6: return IEMOP_RAISE_INVALID_OPCODE(); 10931 10980 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */ 10932 10981 } -
TabularUnified trunk/src/VBox/VMM/VMMR3/IEMR3.cpp ¶
r44529 r47307 21 21 #define LOG_GROUP LOG_GROUP_EM 22 22 #include <VBox/vmm/iem.h> 23 #include <VBox/vmm/cpum.h> 23 24 #include "IEMInternal.h" 24 25 #include <VBox/vmm/vm.h> 25 26 #include <VBox/err.h> 26 27 28 #include <iprt/asm-amd64-x86.h> 27 29 #include <iprt/assert.h> 28 30 29 31 32 33 /** 34 * Initializes the interpreted execution manager. 35 * 36 * This must be called after CPUM as we're quering information from CPUM about 37 * the guest and host CPUs. 38 * 39 * @returns VBox status code. 40 * @param pVM The cross context VM structure. 41 */ 30 42 VMMR3DECL(int) IEMR3Init(PVM pVM) 31 43 { … … 39 51 pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3); 40 52 41 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, 53 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 42 54 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu); 43 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, 55 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 44 56 "Potential exists", "/IEM/CPU%u/cPotentialExits", idCpu); 45 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, 57 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 46 58 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu); 47 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, 59 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 48 60 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu); 49 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, 61 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 50 62 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu); 51 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, 63 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 52 64 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu); 53 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, 65 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, 54 66 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu); 67 68 /* 69 * Host and guest CPU information. 70 */ 71 if (idCpu == 0) 72 { 73 uint32_t uIgnored; 74 CPUMGetGuestCpuId(pVCpu, 1, &uIgnored, &uIgnored, 75 &pVCpu->iem.s.fCpuIdStdFeaturesEcx, &pVCpu->iem.s.fCpuIdStdFeaturesEdx); 76 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM); 77 78 ASMCpuId_ECX_EDX(1, &pVCpu->iem.s.fHostCpuIdStdFeaturesEcx, &pVCpu->iem.s.fHostCpuIdStdFeaturesEdx); 79 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM); 80 } 81 else 82 { 83 pVCpu->iem.s.fCpuIdStdFeaturesEcx = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEcx; 84 pVCpu->iem.s.fCpuIdStdFeaturesEdx = pVM->aCpus[0].iem.s.fCpuIdStdFeaturesEdx; 85 pVCpu->iem.s.enmCpuVendor = pVM->aCpus[0].iem.s.enmCpuVendor; 86 pVCpu->iem.s.fHostCpuIdStdFeaturesEcx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEcx; 87 pVCpu->iem.s.fHostCpuIdStdFeaturesEdx = pVM->aCpus[0].iem.s.fHostCpuIdStdFeaturesEdx; 88 pVCpu->iem.s.enmHostCpuVendor = pVM->aCpus[0].iem.s.enmHostCpuVendor; 89 } 55 90 } 56 91 return VINF_SUCCESS; -
TabularUnified trunk/src/VBox/VMM/include/IEMInternal.h ¶
r46168 r47307 379 379 } aBounceBuffers[3]; 380 380 381 /** @name Target CPU information. 382 * @{ */ 383 /** EDX value of CPUID(1). 384 * @remarks Some bits are subject to change and must be queried dynamically. */ 385 uint32_t fCpuIdStdFeaturesEdx; 386 /** ECX value of CPUID(1). 387 * @remarks Some bits are subject to change and must be queried dynamically. */ 388 uint32_t fCpuIdStdFeaturesEcx; 389 /** The CPU vendor. */ 390 CPUMCPUVENDOR enmCpuVendor; 391 /** @} */ 392 393 /** @name Host CPU information. 394 * @{ */ 395 /** EDX value of CPUID(1). */ 396 uint32_t fHostCpuIdStdFeaturesEdx; 397 /** ECX value of CPUID(1). */ 398 uint32_t fHostCpuIdStdFeaturesEcx; 399 /** The CPU vendor. */ 400 CPUMCPUVENDOR enmHostCpuVendor; 401 /** @} */ 402 381 403 #ifdef IEM_VERIFICATION_MODE_FULL 382 404 /** The event verification records for what IEM did (LIFO). */ … … 680 702 /** @} */ 681 703 704 /** @name Memory ordering 705 * @{ */ 706 typedef IEM_DECL_IMPL_DEF(void, FNIEMAIMPLMEMFENCE,(void)); 707 typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE; 708 IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void)); 709 IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void)); 710 IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void)); 711 IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void)); 712 /** @} */ 713 682 714 /** @name Double precision shifts 683 715 * @{ */ -
TabularUnified trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp ¶
r47284 r47307 134 134 #define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) (g_fRandom) 135 135 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) (g_fRandom) 136 #define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX_ON_HOST(a_fEdx) (g_fRandom) 136 137 #define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (g_fRandom) 137 138 #define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (g_fRandom) … … 501 502 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) do {} while (0) 502 503 #define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) do { (a_GCPtrEff) = 0; CHK_GCPTR(a_GCPtrEff); } while (0) 504 #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) do {} while (0) 503 505 #define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) do {} while (0) 504 506 #define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) do {} while (0)
Note:
See TracChangeset
for help on using the changeset viewer.