Changeset 46925 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jul 3, 2013 11:16:39 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r46905 r46925 318 318 .test_es: 319 319 test edi, VMX_RESTORE_HOST_SEL_ES 320 jz near .test_ fs320 jz near .test_ldtr 321 321 mov ax, word [rsi + VMXRESTOREHOST.uHostSelES] 322 322 mov es, ax 323 324 .test_ldtr: 325 test edi, VMX_RESTORE_HOST_SEL_LDTR 326 jz near .test_tr 327 mov ax, word [rsi + VMXRESTOREHOST.uHostSelLDTR] 328 lldt ax 329 330 .test_tr: 331 test edi, VMX_RESTORE_HOST_SEL_TR 332 jz near .test_fs 333 mov dx, word [rsi + VMXRESTOREHOST.uHostSelTR] 334 xor xAX, xAX 335 mov ax, dx 336 and al, X86_SEL_MASK ; Mask away TI and RPL bits leaving only the descriptor offset. 337 add xAX, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt. 338 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS (bits 0-7=base, bit 9=busy bit). 339 ltr dx 323 340 324 341 .test_fs: -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r46914 r46925 21 21 %ifdef RT_ARCH_AMD64 22 22 %define VMX_SKIP_GDTR_IDTR 23 %define VMX_SKIP_LDTR_TR 23 24 %endif 24 25 %endif … … 111 112 push xSI 112 113 114 %ifndef VMX_SKIP_LDTR_TR 113 115 ; Save LDTR. 114 116 xor eax, eax … … 119 121 str eax 120 122 push xAX 121 123 %endif 124 125 %ifndef VMX_SKIP_GDTR_IDTR 122 126 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 123 %ifndef VMX_SKIP_GDTR_IDTR124 127 sub xSP, xCB * 2 125 128 sgdt [xSP] … … 182 185 183 186 push xDI 184 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved LDTR + TR). 187 %ifndef VMX_SKIP_LDTR_TR 188 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, LDTR + TR). 189 %else 190 mov xDI, [xSP + xCB] ; pCtx 191 %endif 185 192 186 193 mov [ss:xDI + CPUMCTX.eax], eax … … 202 209 %endif 203 210 211 %ifndef VMX_SKIP_LDTR_TR 204 212 ; Restore TSS selector; must mark it as not busy before using ltr (!) 205 213 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) … … 217 225 pop xAX ; Saved LDTR 218 226 lldt ax 227 %endif 219 228 220 229 add xSP, xCB ; pCtx … … 267 276 %endif 268 277 278 %ifndef VMX_SKIP_LDTR_TR 269 279 ; Restore TSS selector; must mark it as not busy before using ltr (!) 270 280 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) … … 282 292 pop xAX ; Saved LDTR 283 293 lldt ax 294 %endif 284 295 285 296 %ifdef VMX_USE_CACHED_VMCS_ACCESSES … … 306 317 %endif 307 318 319 %ifndef VMX_SKIP_LDTR_TR 308 320 ; Restore TSS selector; must mark it as not busy before using ltr (!) 309 321 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) … … 321 333 pop xAX ; Saved LDTR 322 334 lldt ax 335 %endif 323 336 324 337 %ifdef VMX_USE_CACHED_VMCS_ACCESSES -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46900 r46925 2138 2138 { 2139 2139 int rc = VERR_INTERNAL_ERROR_5; 2140 RTSEL uSelDS = 0; 2141 RTSEL uSelES = 0; 2142 RTSEL uSelFS = 0; 2143 RTSEL uSelGS = 0; 2144 RTSEL uSelTR = 0; 2140 RTSEL uSelDS = 0; 2141 RTSEL uSelES = 0; 2142 RTSEL uSelFS = 0; 2143 RTSEL uSelGS = 0; 2144 RTSEL uSelTR = 0; 2145 RTSEL uSelLDTR = 0; 2145 2146 2146 2147 /* … … 2149 2150 #if HC_ARCH_BITS == 64 2150 2151 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 2151 uSelDS = ASMGetDS(); 2152 uSelES = ASMGetES(); 2153 uSelFS = ASMGetFS(); 2154 uSelGS = ASMGetGS(); 2152 uSelDS = ASMGetDS(); 2153 uSelES = ASMGetES(); 2154 uSelFS = ASMGetFS(); 2155 uSelGS = ASMGetGS(); 2156 uSelLDTR = ASMGetLDTR(); 2155 2157 #endif 2156 2158 … … 2210 2212 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS; 2211 2213 uSelGS = 0; 2214 } 2215 2216 /* 2217 * VT-x unconditionally writes LDTR to 0 on all VM-exits. If the host has something different, we shall restore it. 2218 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers". 2219 */ 2220 if (uSelLDTR) 2221 { 2222 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_LDTR; 2223 pVCpu->hm.s.vmx.RestoreHost.uHostSelLDTR = uSelLDTR; 2212 2224 } 2213 2225 #endif … … 2317 2329 #if HC_ARCH_BITS == 64 2318 2330 uTRBase = X86DESC64_BASE(pDesc); 2331 2332 /* 2333 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits. 2334 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else. 2335 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode. 2336 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 2337 * 2338 * [1] See Intel spec. 3.5 "System Descriptor Types". 2339 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode". 2340 */ 2341 Assert(pDesc->System.u4Type == 11); 2342 if ( pDesc->System.u16LimitLow != 0x67 2343 || pDesc->System.u4LimitHigh) 2344 { 2345 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR; 2346 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR; 2347 2348 /* Store the GDTR here as we need it while restoring TR. */ 2349 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64)); 2350 } 2319 2351 #else 2320 2352 uTRBase = X86DESC_BASE(pDesc);
Note:
See TracChangeset
for help on using the changeset viewer.