Changeset 30861 in vbox
- Timestamp:
- Jul 15, 2010 6:09:29 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUM.cpp
r30493 r30861 842 842 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008)) 843 843 { 844 /* Only expose the virtual and physical address sizes to the guest. (EAX completely)*/844 /* Only expose the virtual and physical address sizes to the guest. */ 845 845 pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff); 846 846 pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */ … … 2283 2283 return VERR_INTERNAL_ERROR_2; 2284 2284 } 2285 2286 /* Notify PGM of the NXE states in case they've changed. */ 2287 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 2288 PGMNotifyNxeChanged(&pVM->aCpus[iCpu], !!(pVM->aCpus[iCpu].cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE)); 2285 2289 return VINF_SUCCESS; 2286 2290 } -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r30263 r30861 24 24 #include <VBox/patm.h> 25 25 #include <VBox/dbgf.h> 26 #include <VBox/pdm.h> 27 #include <VBox/pgm.h> 26 28 #include <VBox/mm.h> 27 29 #include "CPUMInternal.h" … … 730 732 731 733 732 VMMDECL(uint64_t) CPUMGetGuestMsr(PVMCPU pVCpu, unsigned idMsr) 733 { 734 uint64_t u64 = 0; 735 uint8_t u8Multiplier = 4; 736 734 /** 735 * Query an MSR. 736 * 737 * The caller is responsible for checking privilege if the call is the result 738 * of a RDMSR instruction. We'll do the rest. 739 * 740 * @retval VINF_SUCCESS on success. 741 * @retval VERR_CPUM_RAISE_GP_0 on failure (invalid MSR), the caller is 742 * expected to take the appropriate actions. @a *puValue is set to 0. 743 * @param pVCpu The virtual CPU to operate on. 744 * @param idMsr The MSR. 745 * @param puValue Where to return the value.. 746 * 747 * @remarks This will always return the right values, even when we're in the 748 * recompiler. 749 */ 750 VMMDECL(int) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue) 751 { 752 /* 753 * If we don't indicate MSR support in the CPUID feature bits, indicate 754 * that a #GP(0) should be raised. 755 */ 756 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR)) 757 { 758 *puValue = 0; 759 return VERR_CPUM_RAISE_GP_0; 760 } 761 762 int rc = VINF_SUCCESS; 763 uint8_t const u8Multiplier = 4; 737 764 switch (idMsr) 738 765 { 739 766 case MSR_IA32_TSC: 740 u64 = TMCpuTickGet(pVCpu); 767 *puValue = TMCpuTickGet(pVCpu); 768 break; 769 770 case MSR_IA32_APICBASE: 771 rc = PDMApicGetBase(pVCpu->CTX_SUFF(pVM), puValue); 772 if (RT_SUCCESS(rc)) 773 rc = VINF_SUCCESS; 774 else 775 { 776 *puValue = 0; 777 rc = VERR_CPUM_RAISE_GP_0; 778 } 741 779 break; 742 780 743 781 case MSR_IA32_CR_PAT: 744 u64= pVCpu->cpum.s.Guest.msrPAT;782 *puValue = pVCpu->cpum.s.Guest.msrPAT; 745 783 break; 746 784 747 785 case MSR_IA32_SYSENTER_CS: 748 u64= pVCpu->cpum.s.Guest.SysEnter.cs;786 *puValue = pVCpu->cpum.s.Guest.SysEnter.cs; 749 787 break; 750 788 751 789 case MSR_IA32_SYSENTER_EIP: 752 u64= pVCpu->cpum.s.Guest.SysEnter.eip;790 *puValue = pVCpu->cpum.s.Guest.SysEnter.eip; 753 791 break; 754 792 755 793 case MSR_IA32_SYSENTER_ESP: 756 u64= pVCpu->cpum.s.Guest.SysEnter.esp;794 *puValue = pVCpu->cpum.s.Guest.SysEnter.esp; 757 795 break; 758 796 759 797 case MSR_K6_EFER: 760 u64= pVCpu->cpum.s.Guest.msrEFER;798 *puValue = pVCpu->cpum.s.Guest.msrEFER; 761 799 break; 762 800 763 801 case MSR_K8_SF_MASK: 764 u64= pVCpu->cpum.s.Guest.msrSFMASK;802 *puValue = pVCpu->cpum.s.Guest.msrSFMASK; 765 803 break; 766 804 767 805 case MSR_K6_STAR: 768 u64= pVCpu->cpum.s.Guest.msrSTAR;806 *puValue = pVCpu->cpum.s.Guest.msrSTAR; 769 807 break; 770 808 771 809 case MSR_K8_LSTAR: 772 u64= pVCpu->cpum.s.Guest.msrLSTAR;810 *puValue = pVCpu->cpum.s.Guest.msrLSTAR; 773 811 break; 774 812 775 813 case MSR_K8_CSTAR: 776 u64 = pVCpu->cpum.s.Guest.msrCSTAR; 814 *puValue = pVCpu->cpum.s.Guest.msrCSTAR; 815 break; 816 817 case MSR_K8_FS_BASE: 818 *puValue = pVCpu->cpum.s.Guest.fsHid.u64Base; 819 break; 820 821 case MSR_K8_GS_BASE: 822 *puValue = pVCpu->cpum.s.Guest.gsHid.u64Base; 777 823 break; 778 824 779 825 case MSR_K8_KERNEL_GS_BASE: 780 u64= pVCpu->cpum.s.Guest.msrKERNELGSBASE;826 *puValue = pVCpu->cpum.s.Guest.msrKERNELGSBASE; 781 827 break; 782 828 783 829 case MSR_K8_TSC_AUX: 784 u64= pVCpu->cpum.s.GuestMsr.msr.tscAux;830 *puValue = pVCpu->cpum.s.GuestMsr.msr.tscAux; 785 831 break; 786 832 787 833 case MSR_IA32_PERF_STATUS: 788 /** @todo: could really be not exactly correct, maybe use host's values */ 789 /* Keep consistent with helper_rdmsr() in REM */ 790 u64 = (1000ULL /* TSC increment by tick */) 791 | ((uint64_t)u8Multiplier << 24 /* CPU multiplier (aka bus ratio) min */ ) 792 | ((uint64_t)u8Multiplier << 40 /* CPU multiplier (aka bus ratio) max */ ); 793 break; 794 795 case MSR_IA32_FSB_CLOCK_STS: 796 /** 834 /** @todo could really be not exactly correct, maybe use host's values */ 835 *puValue = UINT64_C(1000) /* TSC increment by tick */ 836 | ((uint64_t)u8Multiplier << 24) /* CPU multiplier (aka bus ratio) min */ 837 | ((uint64_t)u8Multiplier << 40) /* CPU multiplier (aka bus ratio) max */; 838 break; 839 840 case MSR_IA32_FSB_CLOCK_STS: 841 /* 797 842 * Encoded as: 798 843 * 0 - 266 … … 802 847 * 5 - return 100 803 848 */ 804 u64= (2 << 4);849 *puValue = (2 << 4); 805 850 break; 806 851 807 852 case MSR_IA32_PLATFORM_INFO: 808 u64 = ((u8Multiplier)<<8 /* Flex ratio max */)809 | ((uint64_t)u8Multiplier << 40 /* Flex ratio min */ );853 *puValue = (u8Multiplier << 8) /* Flex ratio max */ 854 | ((uint64_t)u8Multiplier << 40) /* Flex ratio min */; 810 855 break; 811 856 812 857 case MSR_IA32_THERM_STATUS: 813 858 /* CPU temperature reltive to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */ 814 u64 = (1 << 31) /* validity bit */ |815 (20 << 16) /* degrees till TCC */;859 *puValue = ( 1 << 31) /* validity bit */ 860 | (20 << 16) /* degrees till TCC */; 816 861 break; 817 862 … … 819 864 #if 0 820 865 /* Needs to be tested more before enabling. */ 821 u64= pVCpu->cpum.s.GuestMsr.msr.miscEnable;866 *puValue = pVCpu->cpum.s.GuestMsr.msr.miscEnable; 822 867 #else 823 u64= 0;868 *puValue = 0; 824 869 #endif 825 870 break; 826 871 827 /* fs & gs base skipped on purpose as the current context might not be up-to-date. */ 872 #if 0 /*def IN_RING0 */ 873 case MSR_IA32_PLATFORM_ID: 874 case MSR_IA32_BIOS_SIGN_ID: 875 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL) 876 { 877 /* Available since the P6 family. VT-x implies that this feature is present. */ 878 if (idMsr == MSR_IA32_PLATFORM_ID) 879 *puValue = ASMRdMsr(MSR_IA32_PLATFORM_ID); 880 else if (idMsr == MSR_IA32_BIOS_SIGN_ID) 881 *puValue = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); 882 break; 883 } 884 /* no break */ 885 #endif 886 828 887 default: 829 AssertFailed(); 888 /* In X2APIC specification this range is reserved for APIC control. */ 889 if ( idMsr >= MSR_IA32_APIC_START 890 && idMsr < MSR_IA32_APIC_END) 891 { 892 rc = PDMApicReadMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, puValue); 893 if (RT_SUCCESS(rc)) 894 rc = VINF_SUCCESS; 895 else 896 { 897 *puValue = 0; 898 rc = VERR_CPUM_RAISE_GP_0; 899 } 900 } 901 else 902 { 903 *puValue = 0; 904 rc = VERR_CPUM_RAISE_GP_0; 905 } 830 906 break; 831 907 } 832 return u64; 833 } 834 835 VMMDECL(void) CPUMSetGuestMsr(PVMCPU pVCpu, unsigned idMsr, uint64_t valMsr) 836 { 837 /* On purpose only a limited number of MSRs; use the emulation function to update the others. */ 908 909 return rc; 910 } 911 912 913 /** 914 * Sets the MSR. 915 * 916 * The caller is responsible for checking privilege if the call is the result 917 * of a WRMSR instruction. We'll do the rest. 918 * 919 * @retval VINF_SUCCESS on success. 920 * @retval VERR_CPUM_RAISE_GP_0 on failure, the caller is expected to take the 921 * appropriate actions. 922 * 923 * @param pVCpu The virtual CPU to operate on. 924 * @param idMsr The MSR id. 925 * @param uValue The value to set. 926 * 927 * @remarks Everyone changing MSR values, including the recompiler, shall do it 928 * by calling this method. This makes sure we have current values and 929 * that we trigger all the right actions when something changes. 930 */ 931 VMMDECL(int) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue) 932 { 933 /* 934 * If we don't indicate MSR support in the CPUID feature bits, indicate 935 * that a #GP(0) should be raised. 936 */ 937 if (!(pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_MSR)) 938 return VERR_CPUM_RAISE_GP_0; 939 940 int rc = VINF_SUCCESS; 838 941 switch (idMsr) 839 942 { 943 case MSR_IA32_MISC_ENABLE: 944 pVCpu->cpum.s.GuestMsr.msr.miscEnable = uValue; 945 break; 946 947 case MSR_IA32_TSC: 948 TMCpuTickSet(pVCpu->CTX_SUFF(pVM), pVCpu, uValue); 949 break; 950 951 case MSR_IA32_APICBASE: 952 rc = PDMApicSetBase(pVCpu->CTX_SUFF(pVM), uValue); 953 if (rc != VINF_SUCCESS) 954 rc = VERR_CPUM_RAISE_GP_0; 955 break; 956 957 case MSR_IA32_CR_PAT: 958 pVCpu->cpum.s.Guest.msrPAT = uValue; 959 break; 960 961 case MSR_IA32_SYSENTER_CS: 962 pVCpu->cpum.s.Guest.SysEnter.cs = uValue & 0xffff; /* 16 bits selector */ 963 break; 964 965 case MSR_IA32_SYSENTER_EIP: 966 pVCpu->cpum.s.Guest.SysEnter.eip = uValue; 967 break; 968 969 case MSR_IA32_SYSENTER_ESP: 970 pVCpu->cpum.s.Guest.SysEnter.esp = uValue; 971 break; 972 973 case MSR_K6_EFER: 974 { 975 PVM pVM = pVCpu->CTX_SUFF(pVM); 976 uint64_t const uOldEFER = pVCpu->cpum.s.Guest.msrEFER; 977 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 978 ? pVM->cpum.s.aGuestCpuIdExt[1].edx 979 : 0; 980 uint64_t fMask = 0; 981 982 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */ 983 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) 984 fMask |= MSR_K6_EFER_NXE; 985 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) 986 fMask |= MSR_K6_EFER_LME; 987 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_SEP) 988 fMask |= MSR_K6_EFER_SCE; 989 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 990 fMask |= MSR_K6_EFER_FFXSR; 991 992 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if 993 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 994 if ( (uOldEFER & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME) 995 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)) 996 { 997 Log(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n")); 998 return VERR_CPUM_RAISE_GP_0; 999 } 1000 1001 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */ 1002 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)), 1003 ("Unexpected value %RX64\n", uValue)); 1004 pVCpu->cpum.s.Guest.msrEFER = (uOldEFER & ~fMask) | (uValue & fMask); 1005 1006 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB 1007 if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */ 1008 if ( (uValue & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA)) 1009 != (pVCpu->cpum.s.Guest.msrEFER & (MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA))) 1010 { 1011 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/); 1012 HWACCMFlushTLB(pVCpu); 1013 1014 /* Notify PGM about NXE changes. */ 1015 if ( (uValue & MSR_K6_EFER_NXE) 1016 != (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE)) 1017 PGMNotifyNxeChanged(pVCpu, !!(uValue & MSR_K6_EFER_NXE)); 1018 } 1019 break; 1020 } 1021 1022 case MSR_K8_SF_MASK: 1023 pVCpu->cpum.s.Guest.msrSFMASK = uValue; 1024 break; 1025 1026 case MSR_K6_STAR: 1027 pVCpu->cpum.s.Guest.msrSTAR = uValue; 1028 break; 1029 1030 case MSR_K8_LSTAR: 1031 pVCpu->cpum.s.Guest.msrLSTAR = uValue; 1032 break; 1033 1034 case MSR_K8_CSTAR: 1035 pVCpu->cpum.s.Guest.msrCSTAR = uValue; 1036 break; 1037 1038 case MSR_K8_FS_BASE: 1039 pVCpu->cpum.s.Guest.fsHid.u64Base = uValue; 1040 break; 1041 1042 case MSR_K8_GS_BASE: 1043 pVCpu->cpum.s.Guest.gsHid.u64Base = uValue; 1044 break; 1045 1046 case MSR_K8_KERNEL_GS_BASE: 1047 pVCpu->cpum.s.Guest.msrKERNELGSBASE = uValue; 1048 break; 1049 840 1050 case MSR_K8_TSC_AUX: 841 pVCpu->cpum.s.GuestMsr.msr.tscAux = valMsr; 842 break; 843 844 case MSR_IA32_MISC_ENABLE: 845 pVCpu->cpum.s.GuestMsr.msr.miscEnable = valMsr; 1051 pVCpu->cpum.s.GuestMsr.msr.tscAux = uValue; 846 1052 break; 847 1053 848 1054 default: 849 AssertFailed(); 1055 /* In X2APIC specification this range is reserved for APIC control. */ 1056 if ( idMsr >= MSR_IA32_APIC_START 1057 && idMsr < MSR_IA32_APIC_END) 1058 { 1059 rc = PDMApicWriteMSR(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu, idMsr, uValue); 1060 if (rc != VINF_SUCCESS) 1061 rc = VERR_CPUM_RAISE_GP_0; 1062 } 1063 else 1064 { 1065 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */ 1066 /** @todo rc = VERR_CPUM_RAISE_GP_0 */ 1067 Log(("CPUMSetGuestMsr: Unknown MSR %#x attempted set to %#llx\n", idMsr, uValue)); 1068 } 850 1069 break; 851 1070 } 852 } 1071 return rc; 1072 } 1073 853 1074 854 1075 VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit) -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r30567 r30861 2496 2496 pCtx->rdx = (uTicks >> 32ULL); 2497 2497 /* Low dword of the TSC_AUX msr only. */ 2498 pCtx->rcx = (uint32_t)CPUMGetGuestMsr(pVCpu, MSR_K8_TSC_AUX); 2498 CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pCtx->rcx); 2499 pCtx->rcx &= UINT32_C(0xffffffff); 2499 2500 2500 2501 return VINF_SUCCESS; … … 2737 2738 * @param pVCpu The VMCPU handle. 2738 2739 * @param pRegFrame The register frame. 2739 *2740 2740 */ 2741 2741 VMMDECL(int) EMInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) 2742 2742 { 2743 uint32_t u32Dummy, u32Features, cpl;2744 uint64_t val;2745 CPUMCTX *pCtx;2746 int rc = VINF_SUCCESS;2747 2748 2743 /** @todo According to the Intel manuals, there's a REX version of RDMSR that is slightly different. 2749 2744 * That version clears the high dwords of both RDX & RAX */ 2750 pCtx = CPUMQueryGuestCtxPtr(pVCpu);2751 2745 2752 2746 /* Get the current privilege level. */ 2753 cpl = CPUMGetGuestCPL(pVCpu, pRegFrame); 2754 if (cpl != 0) 2747 if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0) 2755 2748 return VERR_EM_INTERPRETER; /* supervisor only */ 2756 2749 2757 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features); 2758 if (!(u32Features & X86_CPUID_FEATURE_EDX_MSR)) 2759 return VERR_EM_INTERPRETER; /* not supported */ 2760 2761 switch (pRegFrame->ecx) 2762 { 2763 case MSR_IA32_TSC: 2764 val = TMCpuTickGet(pVCpu); 2765 break; 2766 2767 case MSR_IA32_APICBASE: 2768 rc = PDMApicGetBase(pVM, &val); 2769 AssertRC(rc); 2770 break; 2771 2772 case MSR_IA32_CR_PAT: 2773 val = pCtx->msrPAT; 2774 break; 2775 2776 case MSR_IA32_SYSENTER_CS: 2777 val = pCtx->SysEnter.cs; 2778 break; 2779 2780 case MSR_IA32_SYSENTER_EIP: 2781 val = pCtx->SysEnter.eip; 2782 break; 2783 2784 case MSR_IA32_SYSENTER_ESP: 2785 val = pCtx->SysEnter.esp; 2786 break; 2787 2788 case MSR_K6_EFER: 2789 val = pCtx->msrEFER; 2790 break; 2791 2792 case MSR_K8_SF_MASK: 2793 val = pCtx->msrSFMASK; 2794 break; 2795 2796 case MSR_K6_STAR: 2797 val = pCtx->msrSTAR; 2798 break; 2799 2800 case MSR_K8_LSTAR: 2801 val = pCtx->msrLSTAR; 2802 break; 2803 2804 case MSR_K8_CSTAR: 2805 val = pCtx->msrCSTAR; 2806 break; 2807 2808 case MSR_K8_FS_BASE: 2809 val = pCtx->fsHid.u64Base; 2810 break; 2811 2812 case MSR_K8_GS_BASE: 2813 val = pCtx->gsHid.u64Base; 2814 break; 2815 2816 case MSR_K8_KERNEL_GS_BASE: 2817 val = pCtx->msrKERNELGSBASE; 2818 break; 2819 2820 case MSR_K8_TSC_AUX: 2821 val = CPUMGetGuestMsr(pVCpu, MSR_K8_TSC_AUX); 2822 break; 2823 2824 case MSR_IA32_PERF_STATUS: 2825 case MSR_IA32_PLATFORM_INFO: 2826 case MSR_IA32_MISC_ENABLE: 2827 case MSR_IA32_FSB_CLOCK_STS: 2828 case MSR_IA32_THERM_STATUS: 2829 val = CPUMGetGuestMsr(pVCpu, pRegFrame->ecx); 2830 break; 2831 2832 #if 0 /*def IN_RING0 */ 2833 case MSR_IA32_PLATFORM_ID: 2834 case MSR_IA32_BIOS_SIGN_ID: 2835 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL) 2836 { 2837 /* Available since the P6 family. VT-x implies that this feature is present. */ 2838 if (pRegFrame->ecx == MSR_IA32_PLATFORM_ID) 2839 val = ASMRdMsr(MSR_IA32_PLATFORM_ID); 2840 else 2841 if (pRegFrame->ecx == MSR_IA32_BIOS_SIGN_ID) 2842 val = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); 2843 break; 2844 } 2845 /* no break */ 2846 #endif 2847 default: 2848 /* In X2APIC specification this range is reserved for APIC control. */ 2849 if ( pRegFrame->ecx >= MSR_IA32_APIC_START 2850 && pRegFrame->ecx < MSR_IA32_APIC_END) 2851 rc = PDMApicReadMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, &val); 2852 else 2853 /* We should actually trigger a #GP here, but don't as that will cause more trouble. */ 2854 val = 0; 2855 break; 2856 } 2857 LogFlow(("EMInterpretRdmsr %s (%x) -> val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, val)); 2858 if (rc == VINF_SUCCESS) 2859 { 2860 pRegFrame->rax = (uint32_t) val; 2861 pRegFrame->rdx = (uint32_t)(val >> 32); 2862 } 2750 uint64_t uValue; 2751 int rc = CPUMQueryGuestMsr(pVCpu, pRegFrame->ecx, &uValue); 2752 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 2753 { 2754 Assert(rc == VERR_CPUM_RAISE_GP_0); 2755 return VERR_EM_INTERPRETER; 2756 } 2757 pRegFrame->rax = (uint32_t) uValue; 2758 pRegFrame->rdx = (uint32_t)(uValue >> 32); 2759 LogFlow(("EMInterpretRdmsr %s (%x) -> %RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, uValue)); 2863 2760 return rc; 2864 2761 } … … 2870 2767 static int emInterpretRdmsr(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) 2871 2768 { 2872 /* Note: the Intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */ 2769 /* Note: The Intel manual claims there's a REX version of RDMSR that's slightly 2770 different, so we play safe by completely disassembling the instruction. */ 2873 2771 Assert(!(pDis->prefix & PREFIX_REX)); 2874 2772 return EMInterpretRdmsr(pVM, pVCpu, pRegFrame); … … 2886 2784 VMMDECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) 2887 2785 { 2888 uint32_t u32Dummy, u32Features, cpl; 2889 uint64_t val; 2890 CPUMCTX *pCtx; 2891 2892 /* Note: works the same in 32 and 64 bits modes. */ 2893 pCtx = CPUMQueryGuestCtxPtr(pVCpu); 2894 2895 /* Get the current privilege level. */ 2896 cpl = CPUMGetGuestCPL(pVCpu, pRegFrame); 2897 if (cpl != 0) 2898 return VERR_EM_INTERPRETER; /* supervisor only */ 2899 2900 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features); 2901 if (!(u32Features & X86_CPUID_FEATURE_EDX_MSR)) 2902 return VERR_EM_INTERPRETER; /* not supported */ 2903 2904 val = RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx); 2905 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, val)); 2906 switch (pRegFrame->ecx) 2907 { 2908 case MSR_IA32_TSC: 2909 TMCpuTickSet(pVM, pVCpu, val); 2910 break; 2911 2912 case MSR_IA32_APICBASE: 2913 { 2914 int rc = PDMApicSetBase(pVM, val); 2915 AssertRC(rc); 2916 break; 2917 } 2918 2919 case MSR_IA32_CR_PAT: 2920 pCtx->msrPAT = val; 2921 break; 2922 2923 case MSR_IA32_SYSENTER_CS: 2924 pCtx->SysEnter.cs = val & 0xffff; /* 16 bits selector */ 2925 break; 2926 2927 case MSR_IA32_SYSENTER_EIP: 2928 pCtx->SysEnter.eip = val; 2929 break; 2930 2931 case MSR_IA32_SYSENTER_ESP: 2932 pCtx->SysEnter.esp = val; 2933 break; 2934 2935 case MSR_K6_EFER: 2936 { 2937 uint64_t uMask = 0; 2938 uint64_t oldval = pCtx->msrEFER; 2939 2940 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */ 2941 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features); 2942 if (u32Features & X86_CPUID_AMD_FEATURE_EDX_NX) 2943 uMask |= MSR_K6_EFER_NXE; 2944 if (u32Features & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) 2945 uMask |= MSR_K6_EFER_LME; 2946 if (u32Features & X86_CPUID_AMD_FEATURE_EDX_SEP) 2947 uMask |= MSR_K6_EFER_SCE; 2948 if (u32Features & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 2949 uMask |= MSR_K6_EFER_FFXSR; 2950 2951 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 2952 if ( ((pCtx->msrEFER & MSR_K6_EFER_LME) != (val & uMask & MSR_K6_EFER_LME)) 2953 && (pCtx->cr0 & X86_CR0_PG)) 2954 { 2955 AssertMsgFailed(("Illegal MSR_K6_EFER_LME change: paging is enabled!!\n")); 2956 return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ 2957 } 2958 2959 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */ 2960 AssertMsg(!(val & ~(MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA /* ignored anyway */ |MSR_K6_EFER_SCE|MSR_K6_EFER_FFXSR)), ("Unexpected value %RX64\n", val)); 2961 pCtx->msrEFER = (pCtx->msrEFER & ~uMask) | (val & uMask); 2962 2963 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB if MSR_K6_EFER_NXE, MSR_K6_EFER_LME or MSR_K6_EFER_LMA are changed. */ 2964 if ((oldval & (MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA)) != (pCtx->msrEFER & (MSR_K6_EFER_NXE|MSR_K6_EFER_LME|MSR_K6_EFER_LMA))) 2965 HWACCMFlushTLB(pVCpu); 2966 2967 break; 2968 } 2969 2970 case MSR_K8_SF_MASK: 2971 pCtx->msrSFMASK = val; 2972 break; 2973 2974 case MSR_K6_STAR: 2975 pCtx->msrSTAR = val; 2976 break; 2977 2978 case MSR_K8_LSTAR: 2979 pCtx->msrLSTAR = val; 2980 break; 2981 2982 case MSR_K8_CSTAR: 2983 pCtx->msrCSTAR = val; 2984 break; 2985 2986 case MSR_K8_FS_BASE: 2987 pCtx->fsHid.u64Base = val; 2988 break; 2989 2990 case MSR_K8_GS_BASE: 2991 pCtx->gsHid.u64Base = val; 2992 break; 2993 2994 case MSR_K8_KERNEL_GS_BASE: 2995 pCtx->msrKERNELGSBASE = val; 2996 break; 2997 2998 case MSR_K8_TSC_AUX: 2999 case MSR_IA32_MISC_ENABLE: 3000 CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, val); 3001 break; 3002 3003 default: 3004 /* In X2APIC specification this range is reserved for APIC control. */ 3005 if ( pRegFrame->ecx >= MSR_IA32_APIC_START 3006 && pRegFrame->ecx < MSR_IA32_APIC_END) 3007 return PDMApicWriteMSR(pVM, pVCpu->idCpu, pRegFrame->ecx, val); 3008 3009 /* We should actually trigger a #GP here, but don't as that might cause more trouble. */ 3010 break; 3011 } 3012 return VINF_SUCCESS; 2786 /* Check the current privilege level, this instruction is supervisor only. */ 2787 if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0) 2788 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */ 2789 2790 int rc = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)); 2791 if (rc != VINF_SUCCESS) 2792 { 2793 Assert(rc == VERR_CPUM_RAISE_GP_0); 2794 return VERR_EM_INTERPRETER; 2795 } 2796 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, 2797 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx))); 2798 return rc; 3013 2799 } 3014 2800 -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r30831 r30861 938 938 if (HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu)) 939 939 { 940 /* AMD-V nested paging or real/protected mode without paging */940 /* AMD-V nested paging or real/protected mode without paging. */ 941 941 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT; 942 942 enmKind = PGMPOOLKIND_PAE_PD_PHYS; … … 981 981 982 982 # if defined(IN_RC) 983 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any 983 /* 984 * In 32 bits PAE mode we *must* invalidate the TLB when changing a 985 * PDPT entry; the CPU fetches them only during cr3 load, so any 984 986 * non-present PDPT will continue to cause page faults. 985 987 */ … … 2099 2101 default: return "unknown mode value"; 2100 2102 } 2103 } 2104 2105 2106 2107 /** 2108 * Notification from CPUM that the EFER.NXE bit has changed. 2109 * 2110 * @param pVCpu The virtual CPU for which EFER changed. 2111 * @param fNxe The new NXE state. 2112 */ 2113 VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe) 2114 { 2115 /* later */ 2101 2116 } 2102 2117
Note:
See TracChangeset
for help on using the changeset viewer.