Changeset 46267 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 26, 2013 11:29:24 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r45955 r46267 58 58 %define HM_64_BIT_USE_NULL_SEL 59 59 %endif 60 %endif 61 %endif 62 63 %ifndef VBOX_WITH_OLD_VTX_CODE 64 %ifdef RT_ARCH_AMD64 65 %define VBOX_SKIP_RESTORE_SEG 60 66 %endif 61 67 %endif … … 155 161 %endif 156 162 163 %ifdef VBOX_SKIP_RESTORE_SEG 164 %macro MYPUSHSEGS64 2 165 %endmacro 166 167 %macro MYPOPSEGS64 2 168 %endmacro 169 %else ; !VBOX_SKIP_RESTORE_SEG 157 170 ; trashes, rax, rdx & rcx 158 171 %macro MYPUSHSEGS64 2 … … 218 231 %endif 219 232 %endmacro 233 %endif ; VBOX_SKIP_RESTORE_SEG 220 234 221 235 %macro MYPUSHAD32 0 … … 270 284 271 285 BEGINCODE 286 287 288 ;/** 289 ; * Restores host-state fields. 290 ; * 291 ; * @returns VBox status code 292 ; * @param u32RestoreHostFlags x86: [ebp + 08h] msc: rcx gcc: rdi u32RestoreHost - RestoreHost flags. 293 ; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi pRestoreHost - Pointer to the RestoreHost struct. 294 ; */ 295 ALIGNCODE(16) 296 BEGINPROC VMXRestoreHostState 297 %ifdef RT_ARCH_AMD64 298 %ifndef ASM_CALL64_GCC 299 ; On msc R10, R11 are scratch, RDI and RSI are not. So we must save and restore them! 300 mov r10, rdi 301 mov r11, rsi 302 ; Switch to common register usage (i.e. gcc's in this function) 303 mov rdi, rcx 304 mov rsi, rdx 305 %endif 306 307 test edi, VMX_RESTORE_HOST_GDTR 308 jz near .test_idtr 309 lgdt [rsi + 18h] ; pRestoreHost->HostGdtr 310 311 .test_idtr: 312 test edi, VMX_RESTORE_HOST_IDTR 313 jz near .test_ds 314 lidt [rsi + 22h] ; pRestoreHost->HostIdtr 315 316 .test_ds: 317 test edi, VMX_RESTORE_HOST_SEL_DS 318 jz near .test_es 319 mov ax, word [rsi] ; pRestoreHost->uHostSelDS 320 mov ds, ax 321 322 .test_es: 323 test edi, VMX_RESTORE_HOST_SEL_ES 324 jz near .test_fs 325 mov ax, word [rsi + 2] ; pRestoreHost->uHostSelES 326 mov es, ax 327 328 .test_fs: 329 ; We're only restoring the selector. The base is valid and restored by VT-x. If we get an interrupt in between FS & GS 330 ; below, we are fine as the base is what is relevant in 64-bit mode. We need to disable interrupts only during 331 ; writing of the selector as that zaps (trashes) the upper-part of the base until we wrmsr the full 64-bit base. 332 333 test edi, VMX_RESTORE_HOST_SEL_FS 334 jz near .test_gs 335 mov ax, word [rsi + 4] ; pRestoreHost->uHostSelFS 336 cli ; Disable interrupts as mov fs, ax will zap the upper part of the base 337 mov fs, ax 338 mov eax, dword [rsi + 8] ; pRestoreHost->uHostFSBase - Lo 339 mov edx, dword [rsi + 0Ch] ; pRestoreHost->uHostFSBase - Hi 340 mov ecx, MSR_K8_FS_BASE 341 wrmsr 342 sti ; Re-enable interrupts as fsbase is consistent now 343 344 .test_gs: 345 test edi, VMX_RESTORE_HOST_SEL_GS 346 jz near .restore_success 347 mov ax, word [rsi + 6] ; pRestoreHost->uHostSelGS 348 cli ; Disable interrupts as mov gs, ax will zap the upper part of the base 349 mov gs, ax 350 mov eax, dword [rsi + 10h] ; pRestoreHost->uHostGSBase - Lo 351 mov edx, dword [rsi + 14h] ; pRestoreHost->uHostGSBase - Hi 352 mov ecx, MSR_K8_GS_BASE 353 wrmsr 354 sti ; Re-enable interrupts as gsbase is consistent now 355 356 .restore_success: 357 mov eax, VINF_SUCCESS 358 %ifndef ASM_CALL64_GCC 359 ; Restore RDI and RSI on MSC. 360 mov rdi, r10 361 mov rsi, r11 362 %endif 363 %else ; RT_ARCH_X86 364 mov eax, VERR_NOT_IMPLEMENTED 365 %endif 366 ret 367 ENDPROC VMXRestoreHostState 272 368 273 369 -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r46099 r46267 18 18 ; 19 19 20 %ifndef VBOX_WITH_OLD_VTX_CODE 21 %ifdef RT_ARCH_AMD64 22 %define VMX_SKIP_GDTR_IDTR 23 %endif 24 %endif 20 25 21 26 ;/** … … 116 121 117 122 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 123 %ifdef VMX_SKIP_GDTR_IDTR 118 124 sub xSP, xS*2 119 125 sgdt [xSP] … … 121 127 sub xSP, xS*2 122 128 sidt [xSP] 129 %endif 123 130 124 131 %ifdef VBOX_WITH_DR6_EXPERIMENT … … 173 180 174 181 ; Restore base and limit of the IDTR & GDTR. 182 %ifdef VMX_SKIP_GDTR_IDTR 175 183 lidt [xSP] 176 184 add xSP, xS*2 177 185 lgdt [xSP] 178 186 add xSP, xS*2 187 %endif 179 188 180 189 push xDI … … 263 272 .vmxstart_invalid_vmcs_ptr: 264 273 ; Restore base and limit of the IDTR & GDTR 274 %ifdef VMX_SKIP_GDTR_IDTR 265 275 lidt [xSP] 266 276 add xSP, xS*2 267 277 lgdt [xSP] 268 278 add xSP, xS*2 279 %endif 269 280 270 281 ; Restore TSS selector; must mark it as not busy before using ltr (!) … … 300 311 .vmxstart_start_failed: 301 312 ; Restore base and limit of the IDTR & GDTR. 313 %ifdef VMX_SKIP_GDTR_IDTR 302 314 lidt [xSP] 303 315 add xSP, xS*2 304 316 lgdt [xSP] 305 317 add xSP, xS*2 318 %endif 306 319 307 320 ; Restore TSS selector; must mark it as not busy before using ltr (!) … … 437 450 438 451 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 452 %ifdef VMX_SKIP_GDTR_IDTR 439 453 sub xSP, xS*2 440 454 sgdt [xSP] … … 442 456 sub xSP, xS*2 443 457 sidt [xSP] 458 %endif 444 459 445 460 %ifdef VBOX_WITH_DR6_EXPERIMENT … … 502 517 503 518 ; Restore base and limit of the IDTR & GDTR 519 %ifdef VMX_SKIP_GDTR_IDTR 504 520 lidt [xSP] 505 521 add xSP, xS*2 506 522 lgdt [xSP] 507 523 add xSP, xS*2 524 %endif 508 525 509 526 push xDI … … 609 626 .vmxstart64_invalid_vmcs_ptr: 610 627 ; Restore base and limit of the IDTR & GDTR. 628 %ifdef VMX_SKIP_GDTR_IDTR 611 629 lidt [xSP] 612 630 add xSP, xS*2 613 631 lgdt [xSP] 614 632 add xSP, xS*2 633 %endif 615 634 616 635 ; Restore TSS selector; must mark it as not busy before using ltr (!) … … 659 678 .vmxstart64_start_failed: 660 679 ; Restore base and limit of the IDTR & GDTR. 680 %ifdef VMX_SKIP_GDTR_IDTR 661 681 lidt [xSP] 662 682 add xSP, xS*2 663 683 lgdt [xSP] 664 684 add xSP, xS*2 685 %endif 665 686 666 687 ; Restore TSS selector; must mark it as not busy before using ltr (!) -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46192 r46267 2091 2091 { 2092 2092 int rc = VERR_INTERNAL_ERROR_5; 2093 RTSEL uSelCS = 0;2094 RTSEL uSelSS = 0;2095 2093 RTSEL uSelDS = 0; 2096 2094 RTSEL uSelES = 0; … … 2100 2098 2101 2099 /* 2102 * Host Selectorregisters.2100 * Host DS, ES, FS and GS segment registers. 2103 2101 */ 2102 #if HC_ARCH_BITS == 64 2103 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 2104 uSelDS = ASMGetDS(); 2105 uSelES = ASMGetES(); 2106 uSelFS = ASMGetFS(); 2107 uSelGS = ASMGetGS(); 2108 #endif 2109 2110 /* 2111 * Host CS and SS segment registers. 2112 */ 2113 RTSEL uSelCS; 2114 RTSEL uSelSS; 2104 2115 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2105 2116 if (HMVMX_IS_64BIT_HOST_MODE()) … … 2119 2130 #endif 2120 2131 2121 /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */ 2132 /* 2133 * Host TR segment register. 2134 */ 2122 2135 uSelTR = ASMGetTR(); 2123 2136 2137 #if HC_ARCH_BITS == 64 2138 /* 2139 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them 2140 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers". 2141 */ 2142 if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT)) 2143 { 2144 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS; 2145 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS; 2146 uSelDS = 0; 2147 } 2148 if (uSelES & (X86_SEL_RPL | X86_SEL_LDT)) 2149 { 2150 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES; 2151 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES; 2152 uSelES = 0; 2153 } 2154 if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT)) 2155 { 2156 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS; 2157 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS; 2158 uSelFS = 0; 2159 } 2160 if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT)) 2161 { 2162 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS; 2163 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS; 2164 uSelGS = 0; 2165 } 2166 #endif 2167 2124 2168 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */ 2125 /** @todo Verify if we have any platform that actually run with DS or ES with2126 * RPL != 0 in kernel space. */2127 2169 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT)); 2128 2170 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT)); … … 2144 2186 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc); 2145 2187 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc); 2146 /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */ 2147 #if 0 2188 #if HC_ARCH_BITS == 64 2148 2189 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc); 2149 2190 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc); … … 2156 2197 * Host GDTR and IDTR. 2157 2198 */ 2158 /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should2159 * be safe to -not- save and restore GDTR and IDTR in the assembly2160 * code and just do it here and don't care if the limits are zapped on2161 * VM-exit. */2162 2199 RTGDTR Gdtr; 2163 2200 RT_ZERO(Gdtr); … … 2182 2219 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc); 2183 2220 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc); 2221 2222 #if HC_ARCH_BITS == 64 2223 /* 2224 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the 2225 * maximum limit (0xffff) on every VM-exit. 2226 */ 2227 if (Gdtr.cbGdt != 0xffff) 2228 { 2229 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR; 2230 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64)); 2231 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64)); 2232 } 2233 2234 /* 2235 * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff 2236 * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and 2237 * Intel spec. 6.2 "Exception and Interrupt Vectors". 2238 */ 2239 if (Idtr.cbIdt < 0x0fff) 2240 { 2241 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR; 2242 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64)); 2243 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64)); 2244 } 2245 #endif 2184 2246 } 2185 2247 … … 2190 2252 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt) 2191 2253 { 2192 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));2254 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt)); 2193 2255 return VERR_VMX_INVALID_HOST_STATE; 2194 2256 } … … 2217 2279 /* 2218 2280 * Host FS base and GS base. 2219 * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .2220 * would take care of the bases. In 64-bit, the MSRs come into play.2221 2281 */ 2222 2282 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 2225 2285 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 2226 2286 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 2227 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); 2228 AssertRCReturn(rc, rc); 2229 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); 2230 AssertRCReturn(rc, rc); 2287 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc); 2288 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc); 2289 2290 # if HC_ARCH_BITS == 64 2291 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */ 2292 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS) 2293 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase; 2294 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS) 2295 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase; 2296 # endif 2231 2297 } 2232 2298 #endif … … 5775 5841 } 5776 5842 5843 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 5844 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 5845 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 5777 5846 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 5778 5847 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1); … … 6425 6494 * we are actually going to be preempted, not all the time like we 6426 6495 * currently do. */ 6496 6497 /* Restore host-state bits that VT-x only restores partially. */ 6498 if (pVCpu->hm.s.vmx.fRestoreHostFlags) 6499 { 6500 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6501 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6502 } 6503 6427 6504 /* 6428 6505 * Sync the current VMCS (writes back internal data back into the VMCS region in memory) … … 6740 6817 #endif 6741 6818 6742 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);6743 6819 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about 6744 6820 to start executing. */ 6821 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 6745 6822 } 6746 6823 -
trunk/src/VBox/VMM/include/HMInternal.h
r46192 r46267 682 682 PGMMODE enmPrevGuestMode; 683 683 #else 684 uint32_t fRestoreHostFlags; 685 VMXRESTOREHOST RestoreHost; 684 686 /** Set if guest was executing in real mode (extra checks). */ 685 687 bool fWasInRealMode;
Note:
See TracChangeset
for help on using the changeset viewer.