- Timestamp:
- Jun 16, 2010 6:31:42 PM (15 years ago)
- Location:
- trunk/src
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUM.cpp
r30164 r30263 63 63 *******************************************************************************/ 64 64 /** The current saved state version. */ 65 #define CPUM_SAVED_STATE_VERSION 11 65 #define CPUM_SAVED_STATE_VERSION 12 66 /** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden 67 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */ 68 #define CPUM_SAVED_STATE_VERSION_VER3_2 11 66 69 /** The saved state version of 3.0 and 3.1 trunk before the teleportation 67 70 * changes. */ … … 2065 2068 */ 2066 2069 if ( uVersion != CPUM_SAVED_STATE_VERSION 2070 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_2 2067 2071 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0 2068 2072 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR … … 2134 2138 } 2135 2139 } 2140 2141 /* Older states does not set CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID for 2142 raw-mode guest, so we have to do it ourselves. */ 2143 if ( uVersion <= CPUM_SAVED_STATE_VERSION_VER3_2 2144 && !HWACCMIsEnabled(pVM)) 2145 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 2146 pVM->aCpus[iCpu].cpum.s.fChanged |= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID; 2136 2147 } 2137 2148 … … 2272 2283 return VERR_INTERNAL_ERROR_2; 2273 2284 } 2274 2275 2285 return VINF_SUCCESS; 2276 2286 } … … 3574 3584 && pCtx->eflags.Bits.u1VM == 0) 3575 3585 { 3576 if (CPUMAreHiddenSelRegsValid(pV M))3586 if (CPUMAreHiddenSelRegsValid(pVCpu)) 3577 3587 { 3578 3588 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->csHid.Attr.n.u1Long; … … 3768 3778 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdDef); 3769 3779 } 3780 3781 3782 /** 3783 * Transforms the guest CPU state to raw-ring mode. 3784 * 3785 * This function will change the any of the cs and ss register with DPL=0 to DPL=1. 3786 * 3787 * @returns VBox status. (recompiler failure) 3788 * @param pVCpu The VMCPU handle. 3789 * @param pCtxCore The context core (for trap usage). 3790 * @see @ref pg_raw 3791 */ 3792 VMMR3DECL(int) CPUMR3RawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore) 3793 { 3794 PVM pVM = pVCpu->CTX_SUFF(pVM); 3795 3796 Assert(!pVCpu->cpum.s.fRawEntered); 3797 Assert(!pVCpu->cpum.s.fRemEntered); 3798 if (!pCtxCore) 3799 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest); 3800 3801 /* 3802 * Are we in Ring-0? 3803 */ 3804 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0 3805 && !pCtxCore->eflags.Bits.u1VM) 3806 { 3807 /* 3808 * Enter execution mode. 3809 */ 3810 PATMRawEnter(pVM, pCtxCore); 3811 3812 /* 3813 * Set CPL to Ring-1. 3814 */ 3815 pCtxCore->ss |= 1; 3816 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0) 3817 pCtxCore->cs |= 1; 3818 } 3819 else 3820 { 3821 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM, 3822 ("ring-1 code not supported\n")); 3823 /* 3824 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well. 3825 */ 3826 PATMRawEnter(pVM, pCtxCore); 3827 } 3828 3829 /* 3830 * Invalidate the hidden registers. 3831 */ 3832 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID; 3833 3834 /* 3835 * Assert sanity. 3836 */ 3837 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n")); 3838 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL) 3839 || pCtxCore->eflags.Bits.u1VM, 3840 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL)); 3841 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP)); 3842 3843 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */ 3844 3845 pVCpu->cpum.s.fRawEntered = true; 3846 return VINF_SUCCESS; 3847 } 3848 3849 3850 /** 3851 * Transforms the guest CPU state from raw-ring mode to correct values. 3852 * 3853 * This function will change any selector registers with DPL=1 to DPL=0. 3854 * 3855 * @returns Adjusted rc. 3856 * @param pVCpu The VMCPU handle. 3857 * @param rc Raw mode return code 3858 * @param pCtxCore The context core (for trap usage). 3859 * @see @ref pg_raw 3860 */ 3861 VMMR3DECL(int) CPUMR3RawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc) 3862 { 3863 PVM pVM = pVCpu->CTX_SUFF(pVM); 3864 3865 /* 3866 * Don't leave if we've already left (in GC). 3867 */ 3868 Assert(pVCpu->cpum.s.fRawEntered); 3869 Assert(!pVCpu->cpum.s.fRemEntered); 3870 if (!pVCpu->cpum.s.fRawEntered) 3871 return rc; 3872 pVCpu->cpum.s.fRawEntered = false; 3873 3874 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 3875 if (!pCtxCore) 3876 pCtxCore = CPUMCTX2CORE(pCtx); 3877 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL)); 3878 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL), 3879 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL)); 3880 3881 /* 3882 * Are we executing in raw ring-1? 3883 */ 3884 if ( (pCtxCore->ss & X86_SEL_RPL) == 1 3885 && !pCtxCore->eflags.Bits.u1VM) 3886 { 3887 /* 3888 * Leave execution mode. 3889 */ 3890 PATMRawLeave(pVM, pCtxCore, rc); 3891 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */ 3892 /** @todo See what happens if we remove this. */ 3893 if ((pCtxCore->ds & X86_SEL_RPL) == 1) 3894 pCtxCore->ds &= ~X86_SEL_RPL; 3895 if ((pCtxCore->es & X86_SEL_RPL) == 1) 3896 pCtxCore->es &= ~X86_SEL_RPL; 3897 if ((pCtxCore->fs & X86_SEL_RPL) == 1) 3898 pCtxCore->fs &= ~X86_SEL_RPL; 3899 if ((pCtxCore->gs & X86_SEL_RPL) == 1) 3900 pCtxCore->gs &= ~X86_SEL_RPL; 3901 3902 /* 3903 * Ring-1 selector => Ring-0. 3904 */ 3905 pCtxCore->ss &= ~X86_SEL_RPL; 3906 if ((pCtxCore->cs & X86_SEL_RPL) == 1) 3907 pCtxCore->cs &= ~X86_SEL_RPL; 3908 } 3909 else 3910 { 3911 /* 3912 * PATM is taking care of the IOPL and IF flags for us. 3913 */ 3914 PATMRawLeave(pVM, pCtxCore, rc); 3915 if (!pCtxCore->eflags.Bits.u1VM) 3916 { 3917 /** @todo See what happens if we remove this. */ 3918 if ((pCtxCore->ds & X86_SEL_RPL) == 1) 3919 pCtxCore->ds &= ~X86_SEL_RPL; 3920 if ((pCtxCore->es & X86_SEL_RPL) == 1) 3921 pCtxCore->es &= ~X86_SEL_RPL; 3922 if ((pCtxCore->fs & X86_SEL_RPL) == 1) 3923 pCtxCore->fs &= ~X86_SEL_RPL; 3924 if ((pCtxCore->gs & X86_SEL_RPL) == 1) 3925 pCtxCore->gs &= ~X86_SEL_RPL; 3926 } 3927 } 3928 3929 return rc; 3930 } 3931 3932 3933 /** 3934 * Enters REM, gets and resets the changed flags (CPUM_CHANGED_*). 3935 * 3936 * Only REM should ever call this function! 3937 * 3938 * @returns The changed flags. 3939 * @param pVCpu The VMCPU handle. 3940 * @param puCpl Where to return the current privilege level (CPL). 3941 */ 3942 VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl) 3943 { 3944 Assert(!pVCpu->cpum.s.fRawEntered); 3945 Assert(!pVCpu->cpum.s.fRemEntered); 3946 3947 /* 3948 * Get the CPL first. 3949 */ 3950 *puCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.s.Guest)); 3951 3952 /* 3953 * Get and reset the flags, leaving CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID set. 3954 */ 3955 uint32_t fFlags = pVCpu->cpum.s.fChanged; 3956 pVCpu->cpum.s.fChanged &= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID; /* leave it set */ 3957 3958 /** @todo change the switcher to use the fChanged flags. */ 3959 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM) 3960 { 3961 fFlags |= CPUM_CHANGED_FPU_REM; 3962 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM; 3963 } 3964 3965 pVCpu->cpum.s.fRemEntered = true; 3966 return fFlags; 3967 } 3968 3969 3970 /** 3971 * Leaves REM and works the CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID flag. 3972 * 3973 * @param pVCpu The virtual CPU handle. 3974 * @param fNoOutOfSyncSels This is @c false if there are out of sync 3975 * registers. 3976 */ 3977 VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels) 3978 { 3979 Assert(!pVCpu->cpum.s.fRawEntered); 3980 Assert(pVCpu->cpum.s.fRemEntered); 3981 3982 if (fNoOutOfSyncSels) 3983 pVCpu->cpum.s.fChanged &= ~CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID; 3984 else 3985 pVCpu->cpum.s.fChanged |= ~CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID; 3986 3987 pVCpu->cpum.s.fRemEntered = false; 3988 } 3989 -
trunk/src/VBox/VMM/CPUMInternal.h
r30164 r30263 294 294 } CR4; 295 295 296 /** Have we entered rawmode? */297 bool fRawEntered;298 296 /** Synthetic CPU type? */ 299 297 bool fSyntheticCpu; … … 303 301 * This is used to verify load order dependencies (PGM). */ 304 302 bool fPendingRestore; 305 #if HC_ARCH_BITS == 64 306 uint8_t abPadding[4]; 307 #endif 303 uint8_t abPadding[HC_ARCH_BITS == 64 ? 5 : 1]; 308 304 309 305 /** The standard set of CpuId leafs. */ … … 390 386 uint32_t u32RetCode; 391 387 388 /** Have we entered raw-mode? */ 389 bool fRawEntered; 390 /** Have we entered the recompiler? */ 391 bool fRemEntered; 392 392 393 /** Align the structure on a 64-byte boundrary. */ 393 uint8_t abPadding2[HC_ARCH_BITS == 32 ? 3 6 : 28];394 uint8_t abPadding2[HC_ARCH_BITS == 32 ? 34 : 26]; 394 395 } CPUMCPU, *PCPUMCPU; 395 396 /** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */ -
trunk/src/VBox/VMM/CPUMInternal.mac
r30184 r30263 67 67 .CR4.OrMask resd 1 68 68 ; entered rawmode? 69 .fRawEntered resb 170 69 .fSyntheticCpu resb 1 71 70 .u8PortableCpuIdLevel resb 1 72 71 .fPendingRestore resb 1 73 72 %if RTHCPTR_CB == 8 74 .abPadding resb 473 .abPadding resb 5 75 74 %else 76 ; .abPadding resb 0 75 .abPadding resb 1 77 76 %endif 78 77 … … 418 417 .offCPUM resd 1 419 418 .u32RetCode resd 1 419 .fRawEntered resb 1 420 .fRemEntered resb 1 420 421 421 422 %if RTHCPTR_CB == 8 422 .abPadding2 resb 2 8423 .abPadding2 resb 26 423 424 %else 424 .abPadding2 resb 3 6425 .abPadding2 resb 34 425 426 %endif 426 427 -
trunk/src/VBox/VMM/DBGFDisas.cpp
r28800 r30263 339 339 340 340 if ( pHiddenSel 341 && CPUMAreHiddenSelRegsValid(pV M))341 && CPUMAreHiddenSelRegsValid(pVCpu)) 342 342 { 343 343 SelInfo.Sel = Sel; … … 379 379 SelInfo.u.Raw.Gen.u4LimitHigh = 0xf; 380 380 381 if (CPUMAreHiddenSelRegsValid(pV M))381 if (CPUMAreHiddenSelRegsValid(pVCpu)) 382 382 { /* Assume the current CS defines the execution mode. */ 383 383 pCtxCore = CPUMGetGuestCtxCore(pVCpu); -
trunk/src/VBox/VMM/EMRaw.cpp
r29329 r30263 183 183 * Resume execution. 184 184 */ 185 CPUMR awEnter(pVCpu, NULL);185 CPUMR3RawEnter(pVCpu, NULL); 186 186 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF); 187 187 rc = VMMR3ResumeHyper(pVM, pVCpu); 188 188 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc)); 189 rc = CPUMR awLeave(pVCpu, NULL, rc);189 rc = CPUMR3RawLeave(pVCpu, NULL, rc); 190 190 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); 191 191 … … 244 244 * We do not start time or anything, if anything we should just do a few nanoseconds. 245 245 */ 246 CPUMR awEnter(pVCpu, NULL);246 CPUMR3RawEnter(pVCpu, NULL); 247 247 do 248 248 { … … 257 257 } while ( rc == VINF_SUCCESS 258 258 || rc == VINF_EM_RAW_INTERRUPT); 259 rc = CPUMR awLeave(pVCpu, NULL, rc);259 rc = CPUMR3RawLeave(pVCpu, NULL, rc); 260 260 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); 261 261 … … 1477 1477 * and perhaps EIP) needs to be stored with PATM. 1478 1478 */ 1479 rc = CPUMR awEnter(pVCpu, NULL);1479 rc = CPUMR3RawEnter(pVCpu, NULL); 1480 1480 if (rc != VINF_SUCCESS) 1481 1481 { … … 1500 1500 if (rc != VINF_SUCCESS) 1501 1501 { 1502 rc = CPUMR awLeave(pVCpu, NULL, rc);1502 rc = CPUMR3RawLeave(pVCpu, NULL, rc); 1503 1503 break; 1504 1504 } … … 1542 1542 * execution FFs before doing anything else. 1543 1543 */ 1544 rc = CPUMR awLeave(pVCpu, NULL, rc);1544 rc = CPUMR3RawLeave(pVCpu, NULL, rc); 1545 1545 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); 1546 1546 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r30164 r30263 1854 1854 1855 1855 1856 1857 #ifndef IN_RING0 /** @todo I don't think we need this in R0, so move it to CPUMAll.cpp? */ 1858 1859 /** 1860 * Transforms the guest CPU state to raw-ring mode. 1861 * 1862 * This function will change the any of the cs and ss register with DPL=0 to DPL=1. 1863 * 1864 * @returns VBox status. (recompiler failure) 1865 * @param pVCpu The VMCPU handle. 1866 * @param pCtxCore The context core (for trap usage). 1867 * @see @ref pg_raw 1868 */ 1869 VMMDECL(int) CPUMRawEnter(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore) 1870 { 1871 PVM pVM = pVCpu->CTX_SUFF(pVM); 1872 1873 Assert(!pVM->cpum.s.fRawEntered); 1874 if (!pCtxCore) 1875 pCtxCore = CPUMCTX2CORE(&pVCpu->cpum.s.Guest); 1876 1877 /* 1878 * Are we in Ring-0? 1879 */ 1880 if ( pCtxCore->ss && (pCtxCore->ss & X86_SEL_RPL) == 0 1881 && !pCtxCore->eflags.Bits.u1VM) 1882 { 1883 /* 1884 * Enter execution mode. 1885 */ 1886 PATMRawEnter(pVM, pCtxCore); 1887 1888 /* 1889 * Set CPL to Ring-1. 1890 */ 1891 pCtxCore->ss |= 1; 1892 if (pCtxCore->cs && (pCtxCore->cs & X86_SEL_RPL) == 0) 1893 pCtxCore->cs |= 1; 1894 } 1895 else 1896 { 1897 AssertMsg((pCtxCore->ss & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM, 1898 ("ring-1 code not supported\n")); 1899 /* 1900 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well. 1901 */ 1902 PATMRawEnter(pVM, pCtxCore); 1903 } 1904 1905 /* 1906 * Assert sanity. 1907 */ 1908 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n")); 1909 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL) 1910 || pCtxCore->eflags.Bits.u1VM, 1911 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL)); 1912 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP)); 1913 pCtxCore->eflags.u32 |= X86_EFL_IF; /* paranoia */ 1914 1915 pVM->cpum.s.fRawEntered = true; 1916 return VINF_SUCCESS; 1917 } 1918 1919 1920 /** 1921 * Transforms the guest CPU state from raw-ring mode to correct values. 1922 * 1923 * This function will change any selector registers with DPL=1 to DPL=0. 1924 * 1925 * @returns Adjusted rc. 1926 * @param pVCpu The VMCPU handle. 1927 * @param rc Raw mode return code 1928 * @param pCtxCore The context core (for trap usage). 1929 * @see @ref pg_raw 1930 */ 1931 VMMDECL(int) CPUMRawLeave(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, int rc) 1932 { 1933 PVM pVM = pVCpu->CTX_SUFF(pVM); 1934 1935 /* 1936 * Don't leave if we've already left (in GC). 1937 */ 1938 Assert(pVM->cpum.s.fRawEntered); 1939 if (!pVM->cpum.s.fRawEntered) 1940 return rc; 1941 pVM->cpum.s.fRawEntered = false; 1942 1943 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 1944 if (!pCtxCore) 1945 pCtxCore = CPUMCTX2CORE(pCtx); 1946 Assert(pCtxCore->eflags.Bits.u1VM || (pCtxCore->ss & X86_SEL_RPL)); 1947 AssertMsg(pCtxCore->eflags.Bits.u1VM || pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss & X86_SEL_RPL), 1948 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss & X86_SEL_RPL)); 1949 1950 /* 1951 * Are we executing in raw ring-1? 1952 */ 1953 if ( (pCtxCore->ss & X86_SEL_RPL) == 1 1954 && !pCtxCore->eflags.Bits.u1VM) 1955 { 1956 /* 1957 * Leave execution mode. 1958 */ 1959 PATMRawLeave(pVM, pCtxCore, rc); 1960 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */ 1961 /** @todo See what happens if we remove this. */ 1962 if ((pCtxCore->ds & X86_SEL_RPL) == 1) 1963 pCtxCore->ds &= ~X86_SEL_RPL; 1964 if ((pCtxCore->es & X86_SEL_RPL) == 1) 1965 pCtxCore->es &= ~X86_SEL_RPL; 1966 if ((pCtxCore->fs & X86_SEL_RPL) == 1) 1967 pCtxCore->fs &= ~X86_SEL_RPL; 1968 if ((pCtxCore->gs & X86_SEL_RPL) == 1) 1969 pCtxCore->gs &= ~X86_SEL_RPL; 1970 1971 /* 1972 * Ring-1 selector => Ring-0. 1973 */ 1974 pCtxCore->ss &= ~X86_SEL_RPL; 1975 if ((pCtxCore->cs & X86_SEL_RPL) == 1) 1976 pCtxCore->cs &= ~X86_SEL_RPL; 1977 } 1978 else 1979 { 1980 /* 1981 * PATM is taking care of the IOPL and IF flags for us. 1982 */ 1983 PATMRawLeave(pVM, pCtxCore, rc); 1984 if (!pCtxCore->eflags.Bits.u1VM) 1985 { 1986 /** @todo See what happens if we remove this. */ 1987 if ((pCtxCore->ds & X86_SEL_RPL) == 1) 1988 pCtxCore->ds &= ~X86_SEL_RPL; 1989 if ((pCtxCore->es & X86_SEL_RPL) == 1) 1990 pCtxCore->es &= ~X86_SEL_RPL; 1991 if ((pCtxCore->fs & X86_SEL_RPL) == 1) 1992 pCtxCore->fs &= ~X86_SEL_RPL; 1993 if ((pCtxCore->gs & X86_SEL_RPL) == 1) 1994 pCtxCore->gs &= ~X86_SEL_RPL; 1995 } 1996 } 1997 1998 return rc; 1999 } 2000 1856 #ifndef IN_RING0 2001 1857 /** 2002 1858 * Updates the EFLAGS while we're in raw-mode. … … 2010 1866 PVM pVM = pVCpu->CTX_SUFF(pVM); 2011 1867 2012 if (!pV M->cpum.s.fRawEntered)1868 if (!pVCpu->cpum.s.fRawEntered) 2013 1869 { 2014 1870 pCtxCore->eflags.u32 = eflags; … … 2017 1873 PATMRawSetEFlags(pVM, pCtxCore, eflags); 2018 1874 } 2019 2020 1875 #endif /* !IN_RING0 */ 1876 2021 1877 2022 1878 /** … … 2034 1890 PVM pVM = pVCpu->CTX_SUFF(pVM); 2035 1891 2036 if (!pV M->cpum.s.fRawEntered)1892 if (!pVCpu->cpum.s.fRawEntered) 2037 1893 return pCtxCore->eflags.u32; 2038 1894 return PATMRawGetEFlags(pVM, pCtxCore); 2039 1895 #endif 2040 }2041 2042 2043 /**2044 * Gets and resets the changed flags (CPUM_CHANGED_*).2045 * Only REM should call this function.2046 *2047 * @returns The changed flags.2048 * @param pVCpu The VMCPU handle.2049 */2050 VMMDECL(unsigned) CPUMGetAndClearChangedFlagsREM(PVMCPU pVCpu)2051 {2052 unsigned fFlags = pVCpu->cpum.s.fChanged;2053 pVCpu->cpum.s.fChanged = 0;2054 /** @todo change the switcher to use the fChanged flags. */2055 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)2056 {2057 fFlags |= CPUM_CHANGED_FPU_REM;2058 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;2059 }2060 return fFlags;2061 1896 } 2062 1897 … … 2169 2004 2170 2005 /** 2171 * Mark the guest's debug state as inactive 2006 * Mark the guest's debug state as inactive. 2172 2007 * 2173 2008 * @returns boolean … … 2181 2016 2182 2017 /** 2183 * Mark the hypervisor's debug state as inactive 2018 * Mark the hypervisor's debug state as inactive. 2184 2019 * 2185 2020 * @returns boolean … … 2192 2027 2193 2028 /** 2194 * Checks if the hidden selector registers are valid 2029 * Checks if the hidden selector registers are valid for the specified CPU. 2030 * 2195 2031 * @returns true if they are. 2196 2032 * @returns false if not. 2197 * @param pVM The VM handle. 2198 */ 2199 VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVM pVM) 2200 { 2201 return HWACCMIsEnabled(pVM); 2033 * @param pVCpu The VM handle. 2034 */ 2035 VMMDECL(bool) CPUMAreHiddenSelRegsValid(PVMCPU pVCpu) 2036 { 2037 bool const fRc = !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID); 2038 Assert(fRc || !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))); 2039 Assert(!pVCpu->cpum.s.fRemEntered); 2040 return fRc; 2202 2041 } 2203 2042 … … 2215 2054 uint32_t cpl; 2216 2055 2217 if (CPUMAreHiddenSelRegsValid(pVCpu ->CTX_SUFF(pVM)))2056 if (CPUMAreHiddenSelRegsValid(pVCpu)) 2218 2057 { 2219 2058 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r29676 r30263 773 773 */ 774 774 RTGCPTR PC; 775 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC); 775 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, 776 &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC); 776 777 if (rc == VINF_SUCCESS) 777 778 { -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r28800 r30263 98 98 { 99 99 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff; 100 if (CPUMAreHiddenSelRegsValid(pV M))100 if (CPUMAreHiddenSelRegsValid(pVCpu)) 101 101 uFlat += pHiddenSel->u64Base; 102 102 else … … 106 106 107 107 #ifdef IN_RING0 108 Assert(CPUMAreHiddenSelRegsValid(pV M));108 Assert(CPUMAreHiddenSelRegsValid(pVCpu)); 109 109 #else 110 110 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */ 111 if (!CPUMAreHiddenSelRegsValid(pV M))111 if (!CPUMAreHiddenSelRegsValid(pVCpu)) 112 112 return SELMToFlatBySel(pVM, Sel, Addr); 113 113 #endif … … 170 170 { 171 171 if ( pHiddenSel 172 && CPUMAreHiddenSelRegsValid(pV M))172 && CPUMAreHiddenSelRegsValid(pVCpu)) 173 173 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat); 174 174 else … … 186 186 #ifndef IN_RC 187 187 if ( pHiddenSel 188 && CPUMAreHiddenSelRegsValid(pV M))188 && CPUMAreHiddenSelRegsValid(pVCpu)) 189 189 { 190 190 bool fCheckLimit = true; … … 460 460 * @remarks Don't use when in long mode. 461 461 */ 462 VMMDECL(int) SELMToFlatBySelEx(PVM pVM, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, CPUMSELREGHID *pHiddenSel, unsigned fFlags, PRTGCPTR ppvGC, uint32_t *pcb)462 VMMDECL(int) SELMToFlatBySelEx(PVM pVM, X86EFLAGS eflags, RTSEL Sel, RTGCPTR Addr, PCCPUMSELREGHID pHiddenSel, unsigned fFlags, PRTGCPTR ppvGC, uint32_t *pcb) 463 463 { 464 464 PVMCPU pVCpu = VMMGetCpu(pVM); … … 476 476 { 477 477 if ( pHiddenSel 478 && CPUMAreHiddenSelRegsValid(pV M))478 && CPUMAreHiddenSelRegsValid(pVCpu)) 479 479 *ppvGC = (RTGCPTR)(pHiddenSel->u64Base + uFlat); 480 480 else … … 493 493 /** @todo when we're in 16 bits mode, we should cut off the address as well.. */ 494 494 if ( pHiddenSel 495 && CPUMAreHiddenSelRegsValid(pV M))495 && CPUMAreHiddenSelRegsValid(pVCpu)) 496 496 { 497 497 u1Present = pHiddenSel->Attr.n.u1Present; … … 665 665 * 666 666 * @returns VINF_SUCCESS. 667 * @param pV M VM Handle.667 * @param pVCpu The Virtual CPU handle. 668 668 * @param SelCS Selector part. 669 669 * @param pHidCS The hidden CS register part. Optional. … … 671 671 * @param ppvFlat Where to store the flat address. 672 672 */ 673 DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVM pVM, RTSEL SelCS, PCPUMSELREGHID pHidCS, RTGCPTR Addr, PRTGCPTR ppvFlat) 673 DECLINLINE(int) selmValidateAndConvertCSAddrRealMode(PVMCPU pVCpu, RTSEL SelCS, PCCPUMSELREGHID pHidCS, RTGCPTR Addr, 674 PRTGCPTR ppvFlat) 674 675 { 675 676 RTGCUINTPTR uFlat = (RTGCUINTPTR)Addr & 0xffff; 676 if (!pHidCS || !CPUMAreHiddenSelRegsValid(pV M))677 if (!pHidCS || !CPUMAreHiddenSelRegsValid(pVCpu)) 677 678 uFlat += ((RTGCUINTPTR)SelCS << 4); 678 679 else … … 690 691 * @returns VBox status code. 691 692 * @param pVM VM Handle. 693 * @param pVCpu The virtual CPU handle. 692 694 * @param SelCPL Current privilege level. Get this from SS - CS might be conforming! 693 695 * A full selector can be passed, we'll only use the RPL part. … … 697 699 * @param pcBits Where to store the segment bitness (16/32/64). Optional. 698 700 */ 699 DECLINLINE(int) selmValidateAndConvertCSAddrStd(PVM pVM, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, PRTGCPTR ppvFlat, uint32_t *pcBits) 700 { 701 Assert(!CPUMAreHiddenSelRegsValid(pVM)); 702 701 DECLINLINE(int) selmValidateAndConvertCSAddrStd(PVM pVM, PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, RTGCPTR Addr, 702 PRTGCPTR ppvFlat, uint32_t *pcBits) 703 { 703 704 /** @todo validate limit! */ 704 705 X86DESC Desc; … … 771 772 * @param ppvFlat Where to store the flat address. 772 773 */ 773 DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCPUMSELREGHID pHidCS, RTGCPTR Addr, PRTGCPTR ppvFlat) 774 DECLINLINE(int) selmValidateAndConvertCSAddrHidden(PVMCPU pVCpu, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pHidCS, 775 RTGCPTR Addr, PRTGCPTR ppvFlat) 774 776 { 775 777 /* … … 849 851 { 850 852 *pcBits = 16; 851 return selmValidateAndConvertCSAddrRealMode(pVM, SelCS, NULL, Addr, ppvFlat); 852 } 853 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, pcBits); 853 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, NULL, Addr, ppvFlat); 854 } 855 Assert(!CPUMAreHiddenSelRegsValid(pVCpu)); 856 return selmValidateAndConvertCSAddrStd(pVM, pVCpu, SelCPL, SelCS, Addr, ppvFlat, pcBits); 854 857 } 855 858 #endif /* IN_RC */ … … 869 872 * @param ppvFlat Where to store the flat address. 870 873 */ 871 VMMDECL(int) SELMValidateAndConvertCSAddr(PVM pVM, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, CPUMSELREGHID *pHiddenCSSel, RTGCPTR Addr, PRTGCPTR ppvFlat) 874 VMMDECL(int) SELMValidateAndConvertCSAddr(PVM pVM, X86EFLAGS eflags, RTSEL SelCPL, RTSEL SelCS, PCCPUMSELREGHID pHiddenCSSel, 875 RTGCPTR Addr, PRTGCPTR ppvFlat) 872 876 { 873 877 PVMCPU pVCpu = VMMGetCpu(pVM); … … 875 879 if ( eflags.Bits.u1VM 876 880 || CPUMIsGuestInRealMode(pVCpu)) 877 return selmValidateAndConvertCSAddrRealMode(pV M, SelCS, pHiddenCSSel, Addr, ppvFlat);881 return selmValidateAndConvertCSAddrRealMode(pVCpu, SelCS, pHiddenCSSel, Addr, ppvFlat); 878 882 879 883 #ifdef IN_RING0 880 Assert(CPUMAreHiddenSelRegsValid(pV M));884 Assert(CPUMAreHiddenSelRegsValid(pVCpu)); 881 885 #else 882 886 /** @todo when we're in 16 bits mode, we should cut off the address as well? (like in selmValidateAndConvertCSAddrRealMode) */ 883 if (!CPUMAreHiddenSelRegsValid(pV M))884 return selmValidateAndConvertCSAddrStd(pVM, SelCPL, SelCS, Addr, ppvFlat, NULL);887 if (!CPUMAreHiddenSelRegsValid(pVCpu) || !pHiddenCSSel) 888 return selmValidateAndConvertCSAddrStd(pVM, pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL); 885 889 #endif 886 890 return selmValidateAndConvertCSAddrHidden(pVCpu, SelCPL, SelCS, pHiddenCSSel, Addr, ppvFlat); … … 894 898 * @returns DISCPUMODE according to the selector type (16, 32 or 64 bits) 895 899 * @param pVM VM Handle. 900 * @param pVCpu The virtual CPU handle. 896 901 * @param Sel The selector. 897 902 */ 898 static DISCPUMODE selmGetCpuModeFromSelector(PVM pVM, RTSEL Sel)899 { 900 Assert(!CPUMAreHiddenSelRegsValid(pV M));903 static DISCPUMODE selmGetCpuModeFromSelector(PVM pVM, PVMCPU pVCpu, RTSEL Sel) 904 { 905 Assert(!CPUMAreHiddenSelRegsValid(pVCpu)); 901 906 902 907 /** @todo validate limit! */ … … 924 929 * @param pHiddenSel The hidden selector register. 925 930 */ 926 VMMDECL(DISCPUMODE) SELMGetCpuModeFromSelector(PVM pVM, X86EFLAGS eflags, RTSEL Sel, CPUMSELREGHID *pHiddenSel)931 VMMDECL(DISCPUMODE) SELMGetCpuModeFromSelector(PVM pVM, X86EFLAGS eflags, RTSEL Sel, PCCPUMSELREGHID pHiddenSel) 927 932 { 928 933 PVMCPU pVCpu = VMMGetCpu(pVM); 929 934 #ifdef IN_RING0 930 Assert(CPUMAreHiddenSelRegsValid(pV M));935 Assert(CPUMAreHiddenSelRegsValid(pVCpu)); 931 936 #else /* !IN_RING0 */ 932 if (!CPUMAreHiddenSelRegsValid(pV M))937 if (!CPUMAreHiddenSelRegsValid(pVCpu)) 933 938 { 934 939 /* … … 939 944 return CPUMODE_16BIT; 940 945 941 return selmGetCpuModeFromSelector(pVM, Sel);946 return selmGetCpuModeFromSelector(pVM, pVCpu, Sel); 942 947 } 943 948 #endif /* !IN_RING0 */ … … 947 952 948 953 /* Else compatibility or 32 bits mode. */ 949 return (pHiddenSel->Attr.n.u1DefBig)? CPUMODE_32BIT : CPUMODE_16BIT;954 return pHiddenSel->Attr.n.u1DefBig ? CPUMODE_32BIT : CPUMODE_16BIT; 950 955 } 951 956 -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r29250 r30263 358 358 * @internal 359 359 */ 360 VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGate, uint32_t opsize, TRPMERRORCODE enmError, TRPMEVENT enmType, int32_t iOrgTrap) 360 VMMDECL(int) TRPMForwardTrap(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t iGate, uint32_t opsize, 361 TRPMERRORCODE enmError, TRPMEVENT enmType, int32_t iOrgTrap) 361 362 { 362 363 #ifdef TRPM_FORWARD_TRAPS_IN_GC -
trunk/src/VBox/VMM/VMMGC/TRPMGC.cpp
r28800 r30263 175 175 */ 176 176 RTGCPTR PC; 177 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC); 177 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, 178 (RTGCPTR)pRegFrame->eip, &PC); 178 179 if (rc == VINF_SUCCESS) 179 180 { -
trunk/src/VBox/VMM/VMMGC/TRPMGCHandlers.cpp
r30160 r30263 380 380 */ 381 381 RTGCPTR PC; 382 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &PC); 382 rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, 383 (RTGCPTR)pRegFrame->eip, &PC); 383 384 if (RT_FAILURE(rc)) 384 385 { … … 507 508 */ 508 509 RTGCPTR GCPtr; 509 if (SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &GCPtr) == VINF_SUCCESS) 510 if ( SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, 511 (RTGCPTR)pRegFrame->eip, &GCPtr) 512 == VINF_SUCCESS) 510 513 { 511 514 uint8_t *pu8Code = (uint8_t *)(uintptr_t)GCPtr; -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r30181 r30263 2742 2742 { 2743 2743 RTGCPTR pbCode; 2744 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->rip, &pbCode); 2744 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, 2745 &pRegFrame->csHid, (RTGCPTR)pRegFrame->rip, &pbCode); 2745 2746 if (RT_SUCCESS(rc)) 2746 2747 { -
trunk/src/recompiler/VBoxREMWrapper.cpp
r29333 r30263 545 545 }; 546 546 547 static const REMPARMDESC g_aArgsCPUMR3RemEnter[] = 548 { 549 { REMPARMDESC_FLAGS_INT, sizeof(PVMCPU), NULL }, 550 { REMPARMDESC_FLAGS_INT, sizeof(uint32_t *), NULL } 551 }; 552 553 static const REMPARMDESC g_aArgsCPUMR3RemLeave[] = 554 { 555 { REMPARMDESC_FLAGS_INT, sizeof(PVMCPU), NULL }, 556 { REMPARMDESC_FLAGS_INT, sizeof(bool), NULL } 557 }; 558 547 559 static const REMPARMDESC g_aArgsCPUMSetChangedFlags[] = 548 560 { … … 1170 1182 static REMFNDESC g_aVMMImports[] = 1171 1183 { 1172 { "CPUMAreHiddenSelRegsValid", VMM_FN(CPUMAreHiddenSelRegsValid), &g_aArgsVM[0], RT_ELEMENTS(g_aArgsVM), REMFNDESC_FLAGS_RET_INT, sizeof(bool), NULL }, 1173 { "CPUMGetAndClearChangedFlagsREM", VMM_FN(CPUMGetAndClearChangedFlagsREM), &g_aArgsVM[0], RT_ELEMENTS(g_aArgsVM), REMFNDESC_FLAGS_RET_INT, sizeof(unsigned), NULL }, 1184 { "CPUMAreHiddenSelRegsValid", VMM_FN(CPUMAreHiddenSelRegsValid), &g_aArgsVMCPU[0], RT_ELEMENTS(g_aArgsVMCPU), REMFNDESC_FLAGS_RET_INT, sizeof(bool), NULL }, 1185 { "CPUMR3RemEnter", VMM_FN(CPUMR3RemEnter), &g_aArgsCPUMR3RemEnter[0], RT_ELEMENTS(g_aArgsCPUMR3RemEnter), REMFNDESC_FLAGS_RET_INT, sizeof(uint32_t), NULL }, 1186 { "CPUMR3RemLeave", VMM_FN(CPUMR3RemLeave), &g_aArgsCPUMR3RemLeave[0], RT_ELEMENTS(g_aArgsCPUMR3RemLeave), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1174 1187 { "CPUMSetChangedFlags", VMM_FN(CPUMSetChangedFlags), &g_aArgsCPUMSetChangedFlags[0], RT_ELEMENTS(g_aArgsCPUMSetChangedFlags), REMFNDESC_FLAGS_RET_VOID, 0, NULL }, 1175 1188 { "CPUMGetGuestCPL", VMM_FN(CPUMGetGuestCPL), &g_aArgsCPUMGetGuestCpl[0], RT_ELEMENTS(g_aArgsCPUMGetGuestCpl), REMFNDESC_FLAGS_RET_INT, sizeof(unsigned), NULL }, -
trunk/src/recompiler/VBoxRecompiler.c
r29333 r30263 1798 1798 TRPMEVENT enmType; 1799 1799 uint8_t u8TrapNo; 1800 uint32_t uCpl; 1800 1801 int rc; 1801 1802 … … 1805 1806 pVM->rem.s.Env.pVCpu = pVCpu; 1806 1807 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1807 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pV M);1808 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags. 1808 1809 1809 1810 Assert(!pVM->rem.s.fInREM); … … 1927 1928 * Registers which are rarely changed and require special handling / order when changed. 1928 1929 */ 1929 fFlags = CPUM GetAndClearChangedFlagsREM(pVCpu);1930 LogFlow(("CPUM GetAndClearChangedFlagsREM %x\n", fFlags));1930 fFlags = CPUMR3RemEnter(pVCpu, &uCpl); 1931 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl)); 1931 1932 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0 1932 1933 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR … … 2041 2042 2042 2043 /* Set current CPL */ 2043 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));2044 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl); 2044 2045 2045 2046 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF); … … 2057 2058 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss)); 2058 2059 2059 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));2060 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl); 2060 2061 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss); 2061 2062 #ifdef VBOX_WITH_STATISTICS … … 2475 2476 * We're not longer in REM mode. 2476 2477 */ 2478 CPUMR3RemLeave(pVCpu, 2479 HWACCMIsEnabled(pVM) 2480 || ( pVM->rem.s.Env.segs[R_SS].newselector 2481 | pVM->rem.s.Env.segs[R_GS].newselector 2482 | pVM->rem.s.Env.segs[R_FS].newselector 2483 | pVM->rem.s.Env.segs[R_ES].newselector 2484 | pVM->rem.s.Env.segs[R_DS].newselector 2485 | pVM->rem.s.Env.segs[R_CS].newselector) == 0 2486 ); 2477 2487 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM); 2478 2488 pVM->rem.s.fInREM = false;
Note:
See TracChangeset
for help on using the changeset viewer.