- Timestamp:
- Oct 16, 2008 4:36:27 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r13267 r13351 1914 1914 1915 1915 1916 /** 1917 * Interpret LMSW 1916 1917 /** 1918 * Interpret CLTS 1918 1919 * 1919 1920 * @returns VBox status code. 1920 1921 * @param pVM The VM handle. 1922 * 1923 */ 1924 VMMDECL(int) EMInterpretCLTS(PVM pVM) 1925 { 1926 uint64_t cr0 = CPUMGetGuestCR0(pVM); 1927 if (!(cr0 & X86_CR0_TS)) 1928 return VINF_SUCCESS; 1929 return CPUMSetGuestCR0(pVM, cr0 & ~X86_CR0_TS); 1930 } 1931 1932 /** 1933 * CLTS Emulation. 1934 */ 1935 static int emInterpretClts(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) 1936 { 1937 return EMInterpretCLTS(pVM); 1938 } 1939 1940 1941 /** 1942 * Update CRx 1943 * 1944 * @returns VBox status code. 1945 * @param pVM The VM handle. 1946 * @param pRegFrame The register frame. 1947 * @param DestRegCRx CRx register index (USE_REG_CR*) 1948 * @param val New CRx value 1949 * 1950 */ 1951 static int EMUpdateCRx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint64_t val) 1952 { 1953 uint64_t oldval; 1954 uint64_t msrEFER; 1955 int rc; 1956 1957 /** @todo Clean up this mess. */ 1958 LogFlow(("EMInterpretCRxWrite at %VGv CR%d <- %VX64\n", pRegFrame->rip, DestRegCrx, val)); 1959 switch (DestRegCrx) 1960 { 1961 case USE_REG_CR0: 1962 oldval = CPUMGetGuestCR0(pVM); 1963 #ifdef IN_GC 1964 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */ 1965 if ( (val & (X86_CR0_WP | X86_CR0_AM)) 1966 != (oldval & (X86_CR0_WP | X86_CR0_AM))) 1967 return VERR_EM_INTERPRETER; 1968 #endif 1969 CPUMSetGuestCR0(pVM, val); 1970 val = CPUMGetGuestCR0(pVM); 1971 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 1972 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))) 1973 { 1974 /* global flush */ 1975 rc = PGMFlushTLB(pVM, CPUMGetGuestCR3(pVM), true /* global */); 1976 AssertRCReturn(rc, rc); 1977 } 1978 1979 /* Deal with long mode enabling/disabling. */ 1980 msrEFER = CPUMGetGuestEFER(pVM); 1981 if (msrEFER & MSR_K6_EFER_LME) 1982 { 1983 if ( !(oldval & X86_CR0_PG) 1984 && (val & X86_CR0_PG)) 1985 { 1986 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 1987 if (pRegFrame->csHid.Attr.n.u1Long) 1988 { 1989 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n")); 1990 return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ 1991 } 1992 1993 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 1994 if (!(CPUMGetGuestCR4(pVM) & X86_CR4_PAE)) 1995 { 1996 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n")); 1997 return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ 1998 } 1999 msrEFER |= MSR_K6_EFER_LMA; 2000 } 2001 else 2002 if ( (oldval & X86_CR0_PG) 2003 && !(val & X86_CR0_PG)) 2004 { 2005 msrEFER &= ~MSR_K6_EFER_LMA; 2006 /* @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */ 2007 } 2008 CPUMSetGuestEFER(pVM, msrEFER); 2009 } 2010 return PGMChangeMode(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR4(pVM), CPUMGetGuestEFER(pVM)); 2011 2012 case USE_REG_CR2: 2013 rc = CPUMSetGuestCR2(pVM, val); AssertRC(rc); 2014 return VINF_SUCCESS; 2015 2016 case USE_REG_CR3: 2017 /* Reloading the current CR3 means the guest just wants to flush the TLBs */ 2018 rc = CPUMSetGuestCR3(pVM, val); AssertRC(rc); 2019 if (CPUMGetGuestCR0(pVM) & X86_CR0_PG) 2020 { 2021 /* flush */ 2022 rc = PGMFlushTLB(pVM, val, !(CPUMGetGuestCR4(pVM) & X86_CR4_PGE)); 2023 AssertRCReturn(rc, rc); 2024 } 2025 return VINF_SUCCESS; 2026 2027 case USE_REG_CR4: 2028 oldval = CPUMGetGuestCR4(pVM); 2029 rc = CPUMSetGuestCR4(pVM, val); AssertRC(rc); 2030 val = CPUMGetGuestCR4(pVM); 2031 2032 msrEFER = CPUMGetGuestEFER(pVM); 2033 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 2034 if ( (msrEFER & MSR_K6_EFER_LMA) 2035 && (oldval & X86_CR4_PAE) 2036 && !(val & X86_CR4_PAE)) 2037 { 2038 return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ 2039 } 2040 2041 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)) 2042 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))) 2043 { 2044 /* global flush */ 2045 rc = PGMFlushTLB(pVM, CPUMGetGuestCR3(pVM), true /* global */); 2046 AssertRCReturn(rc, rc); 2047 } 2048 # ifdef IN_GC 2049 /* Feeling extremely lazy. */ 2050 if ( (oldval & (X86_CR4_OSFSXR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)) 2051 != (val & (X86_CR4_OSFSXR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))) 2052 { 2053 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val)); 2054 VM_FF_SET(pVM, VM_FF_TO_R3); 2055 } 2056 # endif 2057 return PGMChangeMode(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR4(pVM), CPUMGetGuestEFER(pVM)); 2058 2059 case USE_REG_CR8: 2060 return PDMApicSetTPR(pVM, val); 2061 2062 default: 2063 AssertFailed(); 2064 case USE_REG_CR1: /* illegal op */ 2065 break; 2066 } 2067 return VERR_EM_INTERPRETER; 2068 } 2069 2070 /** 2071 * Interpret CRx write 2072 * 2073 * @returns VBox status code. 2074 * @param pVM The VM handle. 2075 * @param pRegFrame The register frame. 2076 * @param DestRegCRx CRx register index (USE_REG_CR*) 2077 * @param SrcRegGen General purpose register index (USE_REG_E**)) 2078 * 2079 */ 2080 VMMDECL(int) EMInterpretCRxWrite(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen) 2081 { 2082 uint64_t val; 2083 int rc; 2084 2085 if (CPUMIsGuestIn64BitCode(pVM, pRegFrame)) 2086 { 2087 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val); 2088 } 2089 else 2090 { 2091 uint32_t val32; 2092 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32); 2093 val = val32; 2094 } 2095 2096 if (VBOX_SUCCESS(rc)) 2097 return EMUpdateCRx(pVM, pRegFrame, DestRegCrx, val); 2098 2099 return VERR_EM_INTERPRETER; 2100 } 2101 2102 /** 2103 * Interpret LMSW 2104 * 2105 * @returns VBox status code. 2106 * @param pVM The VM handle. 2107 * @param pRegFrame The register frame. 1921 2108 * @param u16Data LMSW source data. 1922 2109 * 1923 2110 */ 1924 VMMDECL(int) EMInterpretLMSW(PVM pVM, uint16_t u16Data)2111 VMMDECL(int) EMInterpretLMSW(PVM pVM, PCPUMCTXCORE pRegFrame, uint16_t u16Data) 1925 2112 { 1926 2113 uint64_t OldCr0 = CPUMGetGuestCR0(pVM); … … 1930 2117 | (u16Data & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS)); 1931 2118 1932 /* don't use this path to go into protected mode! */ 1933 if ((OldCr0 & X86_CR0_PE) != (NewCr0 & X86_CR0_PE)) 1934 return VERR_EM_INTERPRETER; 1935 1936 return CPUMSetGuestCR0(pVM, NewCr0); 2119 return EMUpdateCRx(pVM, pRegFrame, USE_REG_CR0, NewCr0); 1937 2120 } 1938 2121 … … 1963 2146 1964 2147 LogFlow(("emInterpretLmsw %x\n", val)); 1965 return EMInterpretLMSW(pVM, val); 1966 } 1967 1968 1969 /** 1970 * Interpret CLTS 1971 * 1972 * @returns VBox status code. 1973 * @param pVM The VM handle. 1974 * 1975 */ 1976 VMMDECL(int) EMInterpretCLTS(PVM pVM) 1977 { 1978 uint64_t cr0 = CPUMGetGuestCR0(pVM); 1979 if (!(cr0 & X86_CR0_TS)) 1980 return VINF_SUCCESS; 1981 return CPUMSetGuestCR0(pVM, cr0 & ~X86_CR0_TS); 1982 } 1983 1984 /** 1985 * CLTS Emulation. 1986 */ 1987 static int emInterpretClts(PVM pVM, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbSize) 1988 { 1989 return EMInterpretCLTS(pVM); 1990 } 1991 1992 1993 /** 1994 * Interpret CRx write 1995 * 1996 * @returns VBox status code. 1997 * @param pVM The VM handle. 1998 * @param pRegFrame The register frame. 1999 * @param DestRegCRx CRx register index (USE_REG_CR*) 2000 * @param SrcRegGen General purpose register index (USE_REG_E**)) 2001 * 2002 */ 2003 VMMDECL(int) EMInterpretCRxWrite(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t DestRegCrx, uint32_t SrcRegGen) 2004 { 2005 uint64_t val; 2006 uint64_t oldval; 2007 uint64_t msrEFER; 2008 int rc; 2009 2010 /** @todo Clean up this mess. */ 2011 if (CPUMIsGuestIn64BitCode(pVM, pRegFrame)) 2012 { 2013 rc = DISFetchReg64(pRegFrame, SrcRegGen, &val); 2014 } 2015 else 2016 { 2017 uint32_t val32; 2018 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32); 2019 val = val32; 2020 } 2021 2022 if (VBOX_SUCCESS(rc)) 2023 { 2024 LogFlow(("EMInterpretCRxWrite at %VGv CR%d <- %VX64\n", pRegFrame->rip, DestRegCrx, val)); 2025 switch (DestRegCrx) 2026 { 2027 case USE_REG_CR0: 2028 oldval = CPUMGetGuestCR0(pVM); 2029 #ifdef IN_GC 2030 /* CR0.WP and CR0.AM changes require a reschedule run in ring 3. */ 2031 if ( (val & (X86_CR0_WP | X86_CR0_AM)) 2032 != (oldval & (X86_CR0_WP | X86_CR0_AM))) 2033 return VERR_EM_INTERPRETER; 2034 #endif 2035 CPUMSetGuestCR0(pVM, val); 2036 val = CPUMGetGuestCR0(pVM); 2037 if ( (oldval & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 2038 != (val & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))) 2039 { 2040 /* global flush */ 2041 rc = PGMFlushTLB(pVM, CPUMGetGuestCR3(pVM), true /* global */); 2042 AssertRCReturn(rc, rc); 2043 } 2044 2045 /* Deal with long mode enabling/disabling. */ 2046 msrEFER = CPUMGetGuestEFER(pVM); 2047 if (msrEFER & MSR_K6_EFER_LME) 2048 { 2049 if ( !(oldval & X86_CR0_PG) 2050 && (val & X86_CR0_PG)) 2051 { 2052 /* Illegal to have an active 64 bits CS selector (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 2053 if (pRegFrame->csHid.Attr.n.u1Long) 2054 { 2055 AssertMsgFailed(("Illegal enabling of paging with CS.u1Long = 1!!\n")); 2056 return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ 2057 } 2058 2059 /* Illegal to switch to long mode before activating PAE first (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 2060 if (!(CPUMGetGuestCR4(pVM) & X86_CR4_PAE)) 2061 { 2062 AssertMsgFailed(("Illegal enabling of paging with PAE disabled!!\n")); 2063 return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ 2064 } 2065 msrEFER |= MSR_K6_EFER_LMA; 2066 } 2067 else 2068 if ( (oldval & X86_CR0_PG) 2069 && !(val & X86_CR0_PG)) 2070 { 2071 msrEFER &= ~MSR_K6_EFER_LMA; 2072 /* @todo Do we need to cut off rip here? High dword of rip is undefined, so it shouldn't really matter. */ 2073 } 2074 CPUMSetGuestEFER(pVM, msrEFER); 2075 } 2076 return PGMChangeMode(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR4(pVM), CPUMGetGuestEFER(pVM)); 2077 2078 case USE_REG_CR2: 2079 rc = CPUMSetGuestCR2(pVM, val); AssertRC(rc); 2080 return VINF_SUCCESS; 2081 2082 case USE_REG_CR3: 2083 /* Reloading the current CR3 means the guest just wants to flush the TLBs */ 2084 rc = CPUMSetGuestCR3(pVM, val); AssertRC(rc); 2085 if (CPUMGetGuestCR0(pVM) & X86_CR0_PG) 2086 { 2087 /* flush */ 2088 rc = PGMFlushTLB(pVM, val, !(CPUMGetGuestCR4(pVM) & X86_CR4_PGE)); 2089 AssertRCReturn(rc, rc); 2090 } 2091 return VINF_SUCCESS; 2092 2093 case USE_REG_CR4: 2094 oldval = CPUMGetGuestCR4(pVM); 2095 rc = CPUMSetGuestCR4(pVM, val); AssertRC(rc); 2096 val = CPUMGetGuestCR4(pVM); 2097 2098 msrEFER = CPUMGetGuestEFER(pVM); 2099 /* Illegal to disable PAE when long mode is active. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 2100 if ( (msrEFER & MSR_K6_EFER_LMA) 2101 && (oldval & X86_CR4_PAE) 2102 && !(val & X86_CR4_PAE)) 2103 { 2104 return VERR_EM_INTERPRETER; /* @todo generate #GP(0) */ 2105 } 2106 2107 if ( (oldval & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE)) 2108 != (val & (X86_CR4_PGE|X86_CR4_PAE|X86_CR4_PSE))) 2109 { 2110 /* global flush */ 2111 rc = PGMFlushTLB(pVM, CPUMGetGuestCR3(pVM), true /* global */); 2112 AssertRCReturn(rc, rc); 2113 } 2114 # ifdef IN_GC 2115 /* Feeling extremely lazy. */ 2116 if ( (oldval & (X86_CR4_OSFSXR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME)) 2117 != (val & (X86_CR4_OSFSXR|X86_CR4_OSXMMEEXCPT|X86_CR4_PCE|X86_CR4_MCE|X86_CR4_PAE|X86_CR4_DE|X86_CR4_TSD|X86_CR4_PVI|X86_CR4_VME))) 2118 { 2119 Log(("emInterpretMovCRx: CR4: %#RX64->%#RX64 => R3\n", oldval, val)); 2120 VM_FF_SET(pVM, VM_FF_TO_R3); 2121 } 2122 # endif 2123 return PGMChangeMode(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR4(pVM), CPUMGetGuestEFER(pVM)); 2124 2125 case USE_REG_CR8: 2126 return PDMApicSetTPR(pVM, val); 2127 2128 default: 2129 AssertFailed(); 2130 case USE_REG_CR1: /* illegal op */ 2131 break; 2132 } 2133 } 2134 return VERR_EM_INTERPRETER; 2135 } 2136 2148 return EMInterpretLMSW(pVM, pRegFrame, val); 2149 } 2137 2150 2138 2151 /** -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r13343 r13351 2534 2534 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification))); 2535 2535 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitLMSW); 2536 rc = EMInterpretLMSW(pVM, VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));2536 rc = EMInterpretLMSW(pVM, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)); 2537 2537 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 2538 2538 break;
Note:
See TracChangeset
for help on using the changeset viewer.