Changeset 47439 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 27, 2013 8:03:21 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r47123 r47439 286 286 ; * 287 287 ; * @returns VBox status code 288 ; * @param u32RestoreHostFlags x86: [ebp + 08h] msc: rcx gcc: rdi u32RestoreHost -RestoreHost flags.289 ; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi pRestoreHost -Pointer to the RestoreHost struct.288 ; * @param f32RestoreHost x86: [ebp + 08h] msc: ecx gcc: edi RestoreHost flags. 289 ; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi Pointer to the RestoreHost struct. 290 290 ; */ 291 291 ALIGNCODE(16) … … 293 293 %ifdef RT_ARCH_AMD64 294 294 %ifndef ASM_CALL64_GCC 295 ; On msc R10, R11 are scratch, RDI and RSI are not. So we must save and restore them! 295 ; Use GCC's input registers since we'll be needing both rcx and rdx further 296 ; down with the wrmsr instruction. Use the R10 and R11 register for saving 297 ; RDI and RSI since MSC preserve the two latter registers. 296 298 mov r10, rdi 297 299 mov r11, rsi 298 ; Switch to common register usage (i.e. gcc's in this function)299 300 mov rdi, rcx 300 301 mov rsi, rdx … … 302 303 303 304 test edi, VMX_RESTORE_HOST_GDTR 304 jz near.test_idtr305 jz .test_idtr 305 306 lgdt [rsi + VMXRESTOREHOST.HostGdtr] 306 307 307 308 .test_idtr: 308 309 test edi, VMX_RESTORE_HOST_IDTR 309 jz near.test_ds310 jz .test_ds 310 311 lidt [rsi + VMXRESTOREHOST.HostIdtr] 311 312 312 313 .test_ds: 313 314 test edi, VMX_RESTORE_HOST_SEL_DS 314 jz near.test_es315 mov ax, word[rsi + VMXRESTOREHOST.uHostSelDS]315 jz .test_es 316 mov ax, [rsi + VMXRESTOREHOST.uHostSelDS] 316 317 mov ds, ax 317 318 318 319 .test_es: 319 320 test edi, VMX_RESTORE_HOST_SEL_ES 320 jz near.test_tr321 mov ax, word[rsi + VMXRESTOREHOST.uHostSelES]321 jz .test_tr 322 mov ax, [rsi + VMXRESTOREHOST.uHostSelES] 322 323 mov es, ax 323 324 324 325 .test_tr: 325 326 test edi, VMX_RESTORE_HOST_SEL_TR 326 jz near.test_fs327 mov dx, word [rsi + VMXRESTOREHOST.uHostSelTR]328 xor xAX, xAX327 jz .test_fs 328 ; When restoring the TR, we must first clear the busy flag or we'll end up faulting. 329 mov dx, [rsi + VMXRESTOREHOST.uHostSelTR] 329 330 mov ax, dx 330 and al, ~(X86_SEL_LDT | X86_SEL_RPL); Mask away TI and RPL bits leaving only the descriptor offset.331 add xAX, qword [rsi + VMXRESTOREHOST.HostGdtr + 2]; xAX <- descriptor offset + GDTR.pGdt.332 and dword [ ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS(bits 0-7=base, bit 9=busy bit).331 and eax, X86_SEL_MASK_OFF_RPL ; Mask away TI and RPL bits leaving only the descriptor offset. 332 add rax, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt. 333 and dword [rax + 4], ~RT_BIT(9) ; Clear the busy flag in TSS desc (bits 0-7=base, bit 9=busy bit). 333 334 ltr dx 334 335 335 336 .test_fs: 336 ; We're only restoring the selector. The base is valid and restored by VT-x. If we get an interrupt in between FS & GS 337 ; below, we are fine as the base is what is relevant in 64-bit mode. We need to disable interrupts only during 338 ; writing of the selector as that zaps (trashes) the upper-part of the base until we wrmsr the full 64-bit base. 337 ; 338 ; When restoring the selector values for FS and GS, we'll temporarily trash 339 ; the base address (at least the high 32-bit bits, but quite possibly the 340 ; whole base address), the wrmsr will restore it correctly. (VT-x actually 341 ; restores the base correctly when leaving guest mode, but not the selector 342 ; value, so there is little problem with interrupts being enabled prior to 343 ; this restore job.) 344 ; We'll disable ints once for both FS and GS as that's probably faster. 345 ; 346 test edi, VMX_RESTORE_HOST_SEL_FS | VMX_RESTORE_HOST_SEL_GS 347 jz .restore_success 348 pushfq 349 cli ; (see above) 339 350 340 351 test edi, VMX_RESTORE_HOST_SEL_FS 341 jz near.test_gs352 jz .test_gs 342 353 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS] 343 cli ; Disable interrupts as mov fs, ax will zap the upper part of the base344 354 mov fs, ax 345 355 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo … … 347 357 mov ecx, MSR_K8_FS_BASE 348 358 wrmsr 349 sti ; Re-enable interrupts as fsbase is consistent now 350 359 360 test edi, VMX_RESTORE_HOST_SEL_GS 361 jz .restore_flags 351 362 .test_gs: 352 test edi, VMX_RESTORE_HOST_SEL_GS353 jz near .restore_success354 363 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS] 355 cli ; Disable interrupts as mov gs, ax will zap the upper part of the base356 364 mov gs, ax 357 365 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo … … 359 367 mov ecx, MSR_K8_GS_BASE 360 368 wrmsr 361 sti ; Re-enable interrupts as gsbase is consistent now 369 370 .restore_flags: 371 popfq 362 372 363 373 .restore_success: -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47434 r47439 6667 6667 { 6668 6668 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 6669 /** @todo r=ramshankar: This is broken when6670 * VBOX_WITH_VMMR0_DISABLE_PREEMPTION is not defined. As6671 * VMXRestoreHostState() may unconditionally enables interrupts. */6672 #error "VMM: Fix Me! Make VMXRestoreHostState() function to skip cli/sti."6673 #else6674 6669 Assert(ASMIntAreEnabled()); 6670 #endif 6675 6671 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6676 #endif6677 6672 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6678 6673 }
Note:
See TracChangeset
for help on using the changeset viewer.