- Timestamp:
- Jul 3, 2013 11:16:39 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 86928
- Location:
- trunk
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_vmx.h
r46379 r46925 45 45 #define VMX_RESTORE_HOST_SEL_FS RT_BIT(2) 46 46 #define VMX_RESTORE_HOST_SEL_GS RT_BIT(3) 47 #define VMX_RESTORE_HOST_GDTR RT_BIT(4) 48 #define VMX_RESTORE_HOST_IDTR RT_BIT(5) 49 #define VMX_RESTORE_HOST_LDTR RT_BIT(6) 47 #define VMX_RESTORE_HOST_SEL_LDTR RT_BIT(4) 48 #define VMX_RESTORE_HOST_SEL_TR RT_BIT(5) 49 #define VMX_RESTORE_HOST_GDTR RT_BIT(6) 50 #define VMX_RESTORE_HOST_IDTR RT_BIT(7) 50 51 /** @} */ 51 52 … … 62 63 RTSEL uHostSelFS; /* 0x04 */ 63 64 RTSEL uHostSelGS; /* 0x06 */ 64 uint64_t uHostFSBase; /* 0x08 */ 65 uint64_t uHostGSBase; /* 0x10 */ 66 X86XDTR64 HostGdtr; /* 0x18 */ 67 X86XDTR64 HostIdtr; /* 0x22 */ 65 RTSEL uHostSelLDTR; /* 0x08 */ 66 RTSEL uHostSelTR; /* 0x0a */ 67 uint32_t u32Padding; /* 0x0c */ 68 uint64_t uHostFSBase; /* 0x10 */ 69 uint64_t uHostGSBase; /* 0x18 */ 70 X86XDTR64 HostGdtr; /* 0x20 */ 71 X86XDTR64 HostIdtr; /* 0x2a */ 68 72 } VMXRESTOREHOST; 69 73 #pragma pack() … … 71 75 typedef VMXRESTOREHOST *PVMXRESTOREHOST; 72 76 AssertCompileSize(X86XDTR64, 10); 73 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelES, 2); 74 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelFS, 4); 75 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelGS, 6); 76 AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 8); 77 AssertCompileMemberOffset(VMXRESTOREHOST, uHostGSBase, 16); 78 AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr, 24); 79 AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr, 34); 80 AssertCompileSize(VMXRESTOREHOST, 44); 77 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelDS, 0); 78 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelES, 2); 79 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelFS, 4); 80 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelGS, 6); 81 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelLDTR, 8); 82 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelTR, 10); 83 AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 16); 84 AssertCompileMemberOffset(VMXRESTOREHOST, uHostGSBase, 24); 85 AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr, 32); 86 AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr, 42); 87 AssertCompileSize(VMXRESTOREHOST, 52); 81 88 82 89 /** @name VMX VMCS-Read cache indices. -
trunk/include/VBox/vmm/hm_vmx.mac
r46312 r46925 61 61 %define VMX_VMCS_GUEST_DEBUGCTL_FULL 02802h 62 62 %define VMX_VMCS_GUEST_DEBUGCTL_HIGH 02803h 63 %define VMX_VMCS_CTRL_PIN_EXEC 64 %define VMX_VMCS_CTRL_PROC_EXEC 63 %define VMX_VMCS_CTRL_PIN_EXEC 04000h 64 %define VMX_VMCS_CTRL_PROC_EXEC 04002h 65 65 %define VMX_VMCS_CTRL_EXCEPTION_BITMAP 04004h 66 66 %define VMX_VMCS_CTRL_PAGEFAULT_ERROR_MASK 04006h 67 67 %define VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH 04008h 68 68 %define VMX_VMCS_CTRL_CR3_TARGET_COUNT 0400Ah 69 %define VMX_VMCS_CTRL_EXIT 69 %define VMX_VMCS_CTRL_EXIT 0400Ch 70 70 %define VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT 0400Eh 71 71 %define VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT 04010h 72 %define VMX_VMCS_CTRL_ENTRY 72 %define VMX_VMCS_CTRL_ENTRY 04012h 73 73 %define VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT 04014h 74 74 %define VMX_VMCS_CTRL_ENTRY_IRQ_INFO 04016h … … 152 152 %define VMX_VMCS_HOST_RIP 06C16h 153 153 154 %define VMX_RESTORE_HOST_SEL_DS 1h ;RT_BIT(0) 155 %define VMX_RESTORE_HOST_SEL_ES 2h ;RT_BIT(1) 156 %define VMX_RESTORE_HOST_SEL_FS 4h ;RT_BIT(2) 157 %define VMX_RESTORE_HOST_SEL_GS 8h ;RT_BIT(3) 158 %define VMX_RESTORE_HOST_GDTR 10h ;RT_BIT(4) 159 %define VMX_RESTORE_HOST_IDTR 20h ;RT_BIT(5) 160 %define VMX_RESTORE_HOST_LDTR 40h ;RT_BIT(6) 154 %define VMX_RESTORE_HOST_SEL_DS 1h ;RT_BIT(0) 155 %define VMX_RESTORE_HOST_SEL_ES 2h ;RT_BIT(1) 156 %define VMX_RESTORE_HOST_SEL_FS 4h ;RT_BIT(2) 157 %define VMX_RESTORE_HOST_SEL_GS 8h ;RT_BIT(3) 158 %define VMX_RESTORE_HOST_SEL_LDTR 10h ;RT_BIT(4) 159 %define VMX_RESTORE_HOST_SEL_TR 20h ;RT_BIT(5) 160 %define VMX_RESTORE_HOST_GDTR 40h ;RT_BIT(6) 161 %define VMX_RESTORE_HOST_IDTR 80h ;RT_BIT(7) 161 162 162 163 struc VMXRESTOREHOST … … 165 166 .uHostSelFS resw 1 166 167 .uHostSelGS resw 1 168 .uHostSelLDTR resw 1 169 .uHostSelTR resw 1 170 .u32Padding resd 1 167 171 .uHostFSBase resq 1 168 172 .uHostGSBase resq 1 -
trunk/include/iprt/asm-amd64-x86.h
r44528 r46925 351 351 352 352 /** 353 * Get the LDTR register. 354 * @returns LDTR. 355 */ 356 #if RT_INLINE_ASM_EXTERNAL 357 DECLASM(RTSEL) ASMGetLDTR(void); 358 #else 359 DECLINLINE(RTSEL) ASMGetLDTR(void) 360 { 361 RTSEL SelLDTR; 362 # if RT_INLINE_ASM_GNU_STYLE 363 __asm__ __volatile__("sldt %w0\n\t" : "=r" (SelLDTR)); 364 # else 365 __asm 366 { 367 sldt ax 368 mov [SelLDTR], ax 369 } 370 # endif 371 return SelLDTR; 372 } 373 #endif 374 375 376 /** 353 377 * Get the [RE]FLAGS register. 354 378 * @returns [RE]FLAGS. -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r46905 r46925 318 318 .test_es: 319 319 test edi, VMX_RESTORE_HOST_SEL_ES 320 jz near .test_ fs320 jz near .test_ldtr 321 321 mov ax, word [rsi + VMXRESTOREHOST.uHostSelES] 322 322 mov es, ax 323 324 .test_ldtr: 325 test edi, VMX_RESTORE_HOST_SEL_LDTR 326 jz near .test_tr 327 mov ax, word [rsi + VMXRESTOREHOST.uHostSelLDTR] 328 lldt ax 329 330 .test_tr: 331 test edi, VMX_RESTORE_HOST_SEL_TR 332 jz near .test_fs 333 mov dx, word [rsi + VMXRESTOREHOST.uHostSelTR] 334 xor xAX, xAX 335 mov ax, dx 336 and al, X86_SEL_MASK ; Mask away TI and RPL bits leaving only the descriptor offset. 337 add xAX, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt. 338 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS (bits 0-7=base, bit 9=busy bit). 339 ltr dx 323 340 324 341 .test_fs: -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r46914 r46925 21 21 %ifdef RT_ARCH_AMD64 22 22 %define VMX_SKIP_GDTR_IDTR 23 %define VMX_SKIP_LDTR_TR 23 24 %endif 24 25 %endif … … 111 112 push xSI 112 113 114 %ifndef VMX_SKIP_LDTR_TR 113 115 ; Save LDTR. 114 116 xor eax, eax … … 119 121 str eax 120 122 push xAX 121 123 %endif 124 125 %ifndef VMX_SKIP_GDTR_IDTR 122 126 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 123 %ifndef VMX_SKIP_GDTR_IDTR124 127 sub xSP, xCB * 2 125 128 sgdt [xSP] … … 182 185 183 186 push xDI 184 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved LDTR + TR). 187 %ifndef VMX_SKIP_LDTR_TR 188 mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, LDTR + TR). 189 %else 190 mov xDI, [xSP + xCB] ; pCtx 191 %endif 185 192 186 193 mov [ss:xDI + CPUMCTX.eax], eax … … 202 209 %endif 203 210 211 %ifndef VMX_SKIP_LDTR_TR 204 212 ; Restore TSS selector; must mark it as not busy before using ltr (!) 205 213 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) … … 217 225 pop xAX ; Saved LDTR 218 226 lldt ax 227 %endif 219 228 220 229 add xSP, xCB ; pCtx … … 267 276 %endif 268 277 278 %ifndef VMX_SKIP_LDTR_TR 269 279 ; Restore TSS selector; must mark it as not busy before using ltr (!) 270 280 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) … … 282 292 pop xAX ; Saved LDTR 283 293 lldt ax 294 %endif 284 295 285 296 %ifdef VMX_USE_CACHED_VMCS_ACCESSES … … 306 317 %endif 307 318 319 %ifndef VMX_SKIP_LDTR_TR 308 320 ; Restore TSS selector; must mark it as not busy before using ltr (!) 309 321 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p) … … 321 333 pop xAX ; Saved LDTR 322 334 lldt ax 335 %endif 323 336 324 337 %ifdef VMX_USE_CACHED_VMCS_ACCESSES -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46900 r46925 2138 2138 { 2139 2139 int rc = VERR_INTERNAL_ERROR_5; 2140 RTSEL uSelDS = 0; 2141 RTSEL uSelES = 0; 2142 RTSEL uSelFS = 0; 2143 RTSEL uSelGS = 0; 2144 RTSEL uSelTR = 0; 2140 RTSEL uSelDS = 0; 2141 RTSEL uSelES = 0; 2142 RTSEL uSelFS = 0; 2143 RTSEL uSelGS = 0; 2144 RTSEL uSelTR = 0; 2145 RTSEL uSelLDTR = 0; 2145 2146 2146 2147 /* … … 2149 2150 #if HC_ARCH_BITS == 64 2150 2151 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 2151 uSelDS = ASMGetDS(); 2152 uSelES = ASMGetES(); 2153 uSelFS = ASMGetFS(); 2154 uSelGS = ASMGetGS(); 2152 uSelDS = ASMGetDS(); 2153 uSelES = ASMGetES(); 2154 uSelFS = ASMGetFS(); 2155 uSelGS = ASMGetGS(); 2156 uSelLDTR = ASMGetLDTR(); 2155 2157 #endif 2156 2158 … … 2210 2212 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS; 2211 2213 uSelGS = 0; 2214 } 2215 2216 /* 2217 * VT-x unconditionally writes LDTR to 0 on all VM-exits. If the host has something different, we shall restore it. 2218 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers". 2219 */ 2220 if (uSelLDTR) 2221 { 2222 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_LDTR; 2223 pVCpu->hm.s.vmx.RestoreHost.uHostSelLDTR = uSelLDTR; 2212 2224 } 2213 2225 #endif … … 2317 2329 #if HC_ARCH_BITS == 64 2318 2330 uTRBase = X86DESC64_BASE(pDesc); 2331 2332 /* 2333 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits. 2334 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else. 2335 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode. 2336 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 2337 * 2338 * [1] See Intel spec. 3.5 "System Descriptor Types". 2339 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode". 2340 */ 2341 Assert(pDesc->System.u4Type == 11); 2342 if ( pDesc->System.u16LimitLow != 0x67 2343 || pDesc->System.u4LimitHigh) 2344 { 2345 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR; 2346 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR; 2347 2348 /* Store the GDTR here as we need it while restoring TR. */ 2349 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64)); 2350 } 2319 2351 #else 2320 2352 uTRBase = X86DESC_BASE(pDesc);
Note:
See TracChangeset
for help on using the changeset viewer.